repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
almayor/minishell
https://github.com/almayor/minishell
797e7ff771295bbdc7593526d6d64c6b4e78793e
75043e331d493bb52fe2039bdb54bab7290c46a1
a5b21463531dcf07dc361bda7be8e0ef5f2e7254
refs/heads/master
2022-12-24T14:42:01.823430
2020-09-27T05:20:07
2020-09-27T05:20:07
294,998,739
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.414924293756485, "alphanum_fraction": 0.4311463534832001, "avg_line_length": 23.54867172241211, "blob_id": "a456d21e19c54ba85d9c1dbcb2efd05bcd6a252d", "content_id": "bcb740070b185e0d3636d7121466a21650a1a976", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2774, "license_type": "permissive", "max_line_length": 80, "num_lines": 113, "path": "/src/msh_execute.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* msh_execute.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 00:41:23 by unite #+# #+# */\n/* Updated: 2020/09/26 19:40:17 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nint\t\t\tg_gid;\n\nstatic char\t*g_builtin_name[] = {\n\t\"cd\",\n\t\"exit\",\n\t\"echo\",\n\t\"setenv\",\n\t\"unsetenv\",\n\t\"env\",\n};\n\nstatic int\t(*g_builtin_func[]) (char *const *) = {\n\t&msh_cd,\n\t&msh_exit,\n\t&msh_echo,\n\t&msh_setenv,\n\t&msh_unsetenv,\n\t&msh_env\n};\n\nstatic int\tmsh_num_builtins(void)\n{\n\treturn (sizeof(g_builtin_name) / sizeof(char *));\n}\n\nstatic char\t*join_path(const char *dir, const char *file, char *buf)\n{\n\tsize_t\tdir_len;\n\n\tdir_len = ft_strlcpy(buf, dir, PATH_MAX);\n\tif (buf[dir_len] != '/')\n\t\tft_strlcat(buf, \"/\", PATH_MAX);\n\tft_strlcat(buf, file, PATH_MAX);\n\treturn (buf);\n}\n\nstatic char\t*locate_exec(const char *name, char *buf)\n{\n\tstatic char\tpath[PATH_MAX];\n\tchar\t\t*dir;\n\n\tif (*name == '/' && access(name, F_OK) == 0)\n\t\treturn (ft_strcpy(buf, name));\n\telse if (access(name, F_OK) == 0)\n\t{\n\t\tgetcwd(buf, PATH_MAX);\n\t\tft_strlcat(buf, \"/\", PATH_MAX);\n\t\tft_strlcat(buf, name, PATH_MAX);\n\t\treturn (buf);\n\t}\n\tft_strlcpy(path, ft_getenv(\"PATH\") ? ft_getenv(\"PATH\") : \"\", PATH_MAX);\n\tdir = ft_strtok(path, \":\");\n\twhile (dir != NULL)\n\t{\n\t\tjoin_path(dir, name, buf);\n\t\tif (access(buf, F_OK) == 0)\n\t\t\treturn (buf);\n\t\tdir = ft_strtok(NULL, \":\");\n\t}\n\treturn (NULL);\n}\n\nstatic int\tmsh_launch(char *const *argv)\n{\n\tstatic char\texec[PATH_MAX];\n\n\tg_pid_child = fork();\n\tif (g_pid_child < 0)\n\t\tft_terminate(MSH_ERR_FORK, 1);\n\telse if (g_pid_child == 0)\n\t{\n\t\tif (!locate_exec(argv[0], exec))\n\t\t\tft_terminate(MSH_ERR_CMD, 1);\n\t\telse if (access(exec, X_OK))\n\t\t\tft_terminate(MSH_ERR_PERM, 1);\n\t\telse if (execve(exec, argv, g_environ))\n\t\t\tft_terminate(MSH_ERR_EXEC, 1);\n\t}\n\telse\n\t\twait(NULL);\n\tg_pid_child = 0;\n\treturn (1);\n}\n\nint\t\t\tmsh_execute(char *const *argv)\n{\n\tsize_t\ti;\n\n\tif (argv[0] == NULL)\n\t\treturn (1);\n\ti = 0;\n\twhile (i < msh_num_builtins())\n\t{\n\t\tif (ft_strcmp(argv[0], g_builtin_name[i]) == 0)\n\t\t\treturn ((*g_builtin_func[i])(argv));\n\t\ti++;\n\t}\n\treturn (msh_launch(argv));\n}\n" }, { "alpha_fraction": 0.47765052318573, "alphanum_fraction": 0.4905799925327301, "avg_line_length": 28.74725341796875, "blob_id": "ff6649824bd0d51c3b58f17ae846c35e399d880b", "content_id": "d238b52fe94f22a947c58eac07434407ffb43691", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2707, "license_type": "permissive", "max_line_length": 80, "num_lines": 91, "path": "/include/minishell.h", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* minishell.h :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 21:09:23 by unite #+# #+# */\n/* Updated: 2020/09/27 00:01:10 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#ifndef MINISHELL_H\n\n# define MINISHELL_H\n\n# include <limits.h>\n# include <readline/readline.h>\n# include <readline/history.h>\n# include <signal.h>\n# include <stdio.h>\n# include <stdlib.h>\n# include <sys/stat.h>\n# include <sys/types.h>\n# include <sys/wait.h>\n# include <unistd.h>\n\n# include \"libftprintfgnl.h\"\n# include \"ansi.h\"\n\n# define MSH_NAME\t\t\t\"msh\"\n# define MSH_TOK_DELIM\t\t\" \\t\\r\\n\\a\"\n# define MSH_TOK_BUFSIZE\t4096\n# define MSH_PROMPT_BUFSIZE\t128\n\n# define MSH_ERR_MALLOC\t\t\"malloc error\"\n# define MSH_ERR_FORK\t\t\"failed to fork\"\n# define MSH_ERR_PERM\t\t\"permission denied\"\n# define MSH_ERR_EXEC\t\t\"failed to execute\"\n# define MSH_ERR_CMD\t\t\"command not found\"\n# define MSH_ERR_SIGHNDL\t\"failed to set a signal handler\"\n\nextern char\t**g_environ;\nextern int\tg_pid_child;\n\n/*\n** driver functions\n*/\n\nint\t\t\tmsh_execute(char *const *argv);\nchar\t\t*msh_expand(const char *s);\nvoid\t\tmsh_loop(void);\nconst char\t*msh_prompt(void);\nint\t\t\tmsh_statement(const char *statement);\nvoid\t\tmsh_cleanup(void);\n\n/*\n** signal handlers\n*/\n\nvoid\t\thandler_sigint(int sig);\nvoid\t\thandler_sigterm(int sig);\n\n/*\n** builtin commands\n*/\n\nint\t\t\tmsh_cd(char *const *argv);\nint\t\t\tmsh_echo(char *const *argv);\nint\t\t\tmsh_exit(char *const *argv);\nint\t\t\tmsh_setenv(char *const *argv);\nint\t\t\tmsh_unsetenv(char *const *argv);\nint\t\t\tmsh_env(char *const *argv);\n\n/*\n** utils\n*/\n\nvoid\t\tft_error(const char *mes);\nchar\t\t*ft_getenv(const char *name);\nint\t\t\tft_setenv(const char *name, const char *value, int overwrite);\nchar\t\t*ft_strtok(char *str, const char *sep);\nchar\t\t*ft_strtok_r(char *str, const char *sep, char **lasts);\nvoid\t\tft_tabdel(char **tab);\nsize_t\t\tft_tablen(char *const *tab);\nvoid\t\tft_terminate(const char *mes, int rc);\nint\t\t\tft_unsetenv(const char *name);\nvoid\t\t*ft_xcalloc(size_t count, size_t size);\nvoid\t\t*ft_xmalloc(size_t size);\n\n#endif\n" }, { "alpha_fraction": 0.33133840560913086, "alphanum_fraction": 0.3520130515098572, "avg_line_length": 31.821428298950195, "blob_id": "14c5b638ecb24aef2a9f9d154cd9fb576c2f6e28", "content_id": "79dced218f9beee3b257b97f62e8a731ec048572", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1838, "license_type": "permissive", "max_line_length": 80, "num_lines": 56, "path": "/src/builtin/msh_cd.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* msh_cd.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 03:59:33 by unite #+# #+# */\n/* Updated: 2020/09/26 22:34:08 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nstatic int\tcheck_directory(char *const dir)\n{\n\tstruct stat\tstt;\n\n\tif (access(dir, F_OK))\n\t\tft_error(\"cd: no such file or directory\");\n\telse if (stat(dir, &stt))\n\t\tft_error(\"cd: stat error\");\n\telse if (!S_ISDIR(stt.st_mode))\n\t\tft_error(\"cd: not a directory\");\n\telse if (access(dir, X_OK))\n\t\tft_error(\"cd: permission denied\");\n\telse\n\t\treturn (0);\n\treturn (1);\n}\n\nint\t\t\tmsh_cd(char *const *argv)\n{\n\tchar\t\t*dir;\n\tchar\t\t*pwd;\n\n\tdir = argv[1];\n\tif (ft_tablen(argv) > 2)\n\t\tft_error(\"cd: too many arguments\");\n\telse if (dir == NULL && !(dir = ft_getenv(\"HOME\")))\n\t\tft_error(\"cd: HOME not set\");\n\telse if (ft_strcmp(dir, \"-\") == 0 && !(dir = ft_getenv(\"OLDPWD\")))\n\t\tft_error(\"cd: OLDPWD not set\");\n\telse if (check_directory(dir))\n\t\treturn (1);\n\telse if (chdir(dir))\n\t\tft_error(\"cd: unknown error\");\n\telse\n\t{\n\t\tpwd = getcwd(NULL, 0);\n\t\tft_setenv(\"OLDPWD\", ft_getenv(\"PWD\"), 1);\n\t\tft_setenv(\"PWD\", pwd, 1);\n\t\tfree(pwd);\n\t}\n\treturn (1);\n}\n" }, { "alpha_fraction": 0.3598369061946869, "alphanum_fraction": 0.37971457839012146, "avg_line_length": 26.25, "blob_id": "3bde7f70b147d3cd27d3a8f951dbd881ee3de012", "content_id": "9417babe2cdeefb19e7e32f15f5aa4cefbc731b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1962, "license_type": "permissive", "max_line_length": 80, "num_lines": 72, "path": "/src/msh_statement.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* msh_statement.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/26 20:24:12 by unite #+# #+# */\n/* Updated: 2020/09/27 00:38:45 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nstatic size_t\tcount_tokens(const char *s)\n{\n\tsize_t count;\n\tsize_t i;\n\n\tcount = 1;\n\ti = 0;\n\twhile (s[i])\n\t{\n\t\tif (ft_strchr(MSH_TOK_DELIM, s[i]))\n\t\t\tcount++;\n\t\ti++;\n\t}\n\treturn (count);\n}\n\nstatic char\t\t**msh_tokenize(char *line)\n{\n\tchar\t**tokens;\n\tchar\t*token;\n\tchar\t*ptr;\n\tsize_t\tposition;\n\n\ttokens = ft_xcalloc(sizeof(char *), count_tokens(line) + 1);\n\ttoken = ft_strtok_r(line, MSH_TOK_DELIM, &ptr);\n\tposition = 0;\n\twhile (token != NULL)\n\t{\n\t\tif (ft_strlen(token) > 0)\n\t\t\ttokens[position++] = msh_expand(token);\n\t\ttoken = ft_strtok_r(NULL, MSH_TOK_DELIM, &ptr);\n\t}\n\ttokens[position++] = NULL;\n\treturn (tokens);\n}\n\nint\t\t\t\tmsh_statement(const char *statement)\n{\n\tchar\t*statement1;\n\tchar\t*cmd;\n\tchar\t**argv;\n\tchar\t*ptr;\n\tint\t\tstatus;\n\n\tif (!(statement1 = ft_strdup(statement)))\n\t\tft_terminate(MSH_ERR_MALLOC, 2);\n\tcmd = ft_strtok_r(statement1, \";\", &ptr);\n\tstatus = 1;\n\twhile (cmd && status)\n\t{\n\t\targv = msh_tokenize(cmd);\n\t\tstatus = msh_execute(argv);\n\t\tft_tabdel(argv);\n\t\tcmd = ft_strtok_r(NULL, \";\", &ptr);\n\t}\n\tfree(statement1);\n\treturn (status);\n}\n" }, { "alpha_fraction": 0.39122867584228516, "alphanum_fraction": 0.459798127412796, "avg_line_length": 24.201753616333008, "blob_id": "0005595d43046bd3f09a7620476a6733ee6e3872", "content_id": "341830a7f88a01ebcbb2991c4d27a38501cc5510", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2873, "license_type": "permissive", "max_line_length": 80, "num_lines": 114, "path": "/include/ansi.h", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ansi.h :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/13 20:57:29 by unite #+# #+# */\n/* Updated: 2020/09/13 21:00:39 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#ifndef ANSI_H\n\n# define ANSI_H\n\n/*\n** Regular text\n*/\n\n# define ANSI_BLK \"\\e[0;30m\"\n# define ANSI_RED \"\\e[0;31m\"\n# define ANSI_GRN \"\\e[0;32m\"\n# define ANSI_YEL \"\\e[0;33m\"\n# define ANSI_BLU \"\\e[0;34m\"\n# define ANSI_MAG \"\\e[0;35m\"\n# define ANSI_CYN \"\\e[0;36m\"\n# define ANSI_WHT \"\\e[0;37m\"\n\n/*\n** Regular bold text\n*/\n\n# define ANSI_BBLK \"\\e[1;30m\"\n# define ANSI_BRED \"\\e[1;31m\"\n# define ANSI_BGRN \"\\e[1;32m\"\n# define ANSI_BYEL \"\\e[1;33m\"\n# define ANSI_BBLU \"\\e[1;34m\"\n# define ANSI_BMAG \"\\e[1;35m\"\n# define ANSI_BCYN \"\\e[1;36m\"\n# define ANSI_BWHT \"\\e[1;37m\"\n\n/*\n** Regular underline text\n*/\n\n# define ANSI_UBLK \"\\e[4;30m\"\n# define ANSI_URED \"\\e[4;31m\"\n# define ANSI_UGRN \"\\e[4;32m\"\n# define ANSI_UYEL \"\\e[4;33m\"\n# define ANSI_UBLU \"\\e[4;34m\"\n# define ANSI_UMAG \"\\e[4;35m\"\n# define ANSI_UCYN \"\\e[4;36m\"\n# define ANSI_UWHT \"\\e[4;37m\"\n\n/*\n** Regular background\n*/\n\n# define ANSI_BLKB \"\\e[40m\"\n# define ANSI_REDB \"\\e[41m\"\n# define ANSI_GRNB \"\\e[42m\"\n# define ANSI_YELB \"\\e[43m\"\n# define ANSI_BLUB \"\\e[44m\"\n# define ANSI_MAGB \"\\e[45m\"\n# define ANSI_CYNB \"\\e[46m\"\n# define ANSI_WHTB \"\\e[47m\"\n\n/*\n** High intensity background\n*/\n\n# define ANSI_BLKHB \"\\e[0;100m\"\n# define ANSI_REDHB \"\\e[0;101m\"\n# define ANSI_GRNHB \"\\e[0;102m\"\n# define ANSI_YELHB \"\\e[0;103m\"\n# define ANSI_BLUHB \"\\e[0;104m\"\n# define ANSI_MAGHB \"\\e[0;105m\"\n# define ANSI_CYNHB \"\\e[0;106m\"\n# define ANSI_WHTHB \"\\e[0;107m\"\n\n/*\n** High intensity text\n*/\n\n# define ANSI_HBLK \"\\e[0;90m\"\n# define ANSI_HRED \"\\e[0;91m\"\n# define ANSI_HGRN \"\\e[0;92m\"\n# define ANSI_HYEL \"\\e[0;93m\"\n# define ANSI_HBLU \"\\e[0;94m\"\n# define ANSI_HMAG \"\\e[0;95m\"\n# define ANSI_HCYN \"\\e[0;96m\"\n# define ANSI_HWHT \"\\e[0;97m\"\n\n/*\n** Bold high intensity text\n*/\n\n# define ANSI_BHBLK \"\\e[1;90m\"\n# define ANSI_BHRED \"\\e[1;91m\"\n# define ANSI_BHGRN \"\\e[1;92m\"\n# define ANSI_BHYEL \"\\e[1;93m\"\n# define ANSI_BHBLU \"\\e[1;94m\"\n# define ANSI_BHMAG \"\\e[1;95m\"\n# define ANSI_BHCYN \"\\e[1;96m\"\n# define ANSI_BHWHT \"\\e[1;97m\"\n\n/*\n** Reset\n*/\n\n# define ANSI_RESET \"\\e[0m\"\n\n#endif\n" }, { "alpha_fraction": 0.37633490562438965, "alphanum_fraction": 0.3981204628944397, "avg_line_length": 29.012821197509766, "blob_id": "40d8c0643d3d7f7e554262edfb674222694ba749", "content_id": "2dcc78b198f755139a14d2278bd372143cfb5de7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2341, "license_type": "permissive", "max_line_length": 80, "num_lines": 78, "path": "/src/main.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* main.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 01:04:23 by unite #+# #+# */\n/* Updated: 2020/09/27 00:08:33 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nchar\t**g_environ;\nint\t\tg_pid_child;\n\nstatic void\tcopy_to_environ(char *const *envp)\n{\n\tsize_t\ti;\n\n\tg_environ = ft_xcalloc(sizeof(char *), ft_tablen(envp) + 1);\n\ti = 0;\n\twhile (envp[i])\n\t{\n\t\tif (!(g_environ[i] = ft_strdup(envp[i])))\n\t\t\tft_terminate(MSH_ERR_MALLOC, 1);\n\t\ti++;\n\t}\n}\n\nstatic void\tinit_environ(void)\n{\n\tstatic char\tshlvl[10];\n\tint\t\t\tshlvl_d;\n\n\tshlvl_d = ft_getenv(\"SHLVL\") ? ft_atoi(ft_getenv(\"SHLVL\")) : 1;\n\tft_sprintf(shlvl, \"%i\", shlvl_d < 0 || shlvl_d > 99 ? 1 : shlvl_d + 1);\n\tft_setenv(\"SHLVL\", shlvl, 1);\n}\n\nstatic void\tparse_arguments(int argc, char *const *argv)\n{\n\tsize_t\ti;\n\n\ti = 0;\n\twhile (i < argc)\n\t{\n\t\tif (ft_strcmp(argv[i], \"-h\") == 0 || ft_strcmp(argv[i], \"--help\") == 0)\n\t\t\tft_terminate(\"options\\n\"\n\t\t\t\t\t\t\"-h Display help\\n\"\n\t\t\t\t\t\t\"-d Disable TAB autocompletion\\n\"\n\t\t\t\t\t\t\"-c <command> Read commands from a string\", 0);\n\t\tif (ft_strcmp(argv[i], \"-d\") == 0)\n\t\t\trl_bind_key('\\t', rl_insert);\n\t\telse if (ft_strcmp(argv[i], \"-c\") == 0 && i == argc - 1)\n\t\t\tft_terminate(\"-c option requires an argument\", 1);\n\t\telse if (ft_strcmp(argv[i], \"-c\") == 0)\n\t\t{\n\t\t\tmsh_statement(argv[++i]);\n\t\t\tmsh_cleanup();\n\t\t\texit(0);\n\t\t}\n\t\ti++;\n\t}\n}\n\nint\t\t\tmain(int argc, char *const *argv, char *const *envp)\n{\n\tif (signal(SIGINT, &handler_sigint) ||\n\t\tsignal(SIGTERM, &handler_sigterm))\n\t\tft_terminate(MSH_ERR_SIGHNDL, 2);\n\tcopy_to_environ(envp);\n\tinit_environ();\n\tparse_arguments(argc, argv);\n\tmsh_loop();\n\tmsh_cleanup();\n}\n" }, { "alpha_fraction": 0.3118736743927002, "alphanum_fraction": 0.3221985995769501, "avg_line_length": 25.772357940673828, "blob_id": "e9a0fdc45c1eb9272ba19f6f1aa72c8b8db728c9", "content_id": "ea2e989a231ce912773d9c53e5c79eba0a452e77", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3293, "license_type": "permissive", "max_line_length": 80, "num_lines": 123, "path": "/Makefile", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "# **************************************************************************** #\n# #\n# ::: :::::::: #\n# Makefile :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: unite <[email protected]> +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/09/05 18:17:42 by unite #+# #+# #\n# Updated: 2020/09/27 00:01:20 by unite ### ########.fr #\n# #\n# **************************************************************************** #\n\nNAME = minishell\n\nSRC_NAME = \\\nbuiltin/msh_cd.c \\\nbuiltin/msh_echo.c \\\nbuiltin/msh_env.c \\\nbuiltin/msh_exit.c \\\nbuiltin/msh_setenv.c \\\nbuiltin/msh_unsetenv.c \\\nutils/ft_error.c \\\nutils/ft_getenv.c \\\nutils/ft_setenv.c \\\nutils/ft_strtok.c \\\nutils/ft_strtok_r.c \\\nutils/ft_tabdel.c \\\nutils/ft_tablen.c \\\nutils/ft_terminate.c \\\nutils/ft_unsetenv.c \\\nutils/ft_xcalloc.c \\\nutils/ft_xmalloc.c \\\nmsh_execute.c \\\nmsh_expand.c \\\nmsh_loop.c \\\nmsh_prompt.c \\\nmsh_signal.c \\\nmsh_statement.c \\\nmsh_cleanup.c \\\nmain.c \\\n\n################################################################################\n\nPATHS = src\nPATHO = obj\nPATHI = include libftprintfgnl\nPATHL = libftprintfgnl\n\n################################################################################\n\nCC = gcc\nRM = rm\nMKDIR = /bin/mkdir\n\n################################################################################\n\nCOMPILE = $(CC) -c\nCFLAGS += -Werror\nCFLAGS += -O3 -std=gnu11 -ffast-math -march=native\nCFLAGS += -MMD\nCFLAGS += $(foreach path, $(PATHI), -I$(path))\n\nLINK = $(CC)\nLFLAGS += -lreadline -lftprintfgnl -L libftprintfgnl\n\n################################################################################\n\nifeq ($(DEBUG), 1) \n\tCOMPILE += -g\nendif\n\nifeq ($(PROFILE), 1)\n\tCOMPILE += -pg\n\tLINK += -pg\nendif\n\n################################################################################\n\nSRC = $(patsubst %.c, $(PATHS)/%.c, $(SRC_NAME))\nOBJ = $(patsubst %.c, $(PATHO)/%.o, $(SRC_NAME))\n\n################################################################################\n\n$(NAME) : $(OBJ) $(PATHL)/libftprintfgnl.a\n\t$(LINK) $(OBJ) -o $@ $(LFLAGS)\n\n################################################################################\n\n$(PATHO)/%.o : $(PATHS)/%.c\n\t$(MKDIR) -p $(@D)\n\t$(COMPILE) $(CFLAGS) $< -o $@\n\n################################################################################\n\nDEP += $(patsubst %.c, $(PATHO)/%.d, $(SRC_NAME))\n\n-include $(DEP)\n\n################################################################################\n\n.DEFAULT_GOAL = all\n\n.PHONY : all clean fclean re libftprintfgnl test\n\nall : libftprintfgnl $(NAME)\n\nfclean : clean\n\t$(RM) -f $(NAME)\n\nclean :\n\t$(RM) -rf $(PATHO)\n\t$(MAKE) -C $(PATHL) fclean\n\nre : fclean all\n\nlibftprintfgnl :\n\t$(MAKE) -C $(PATHL)\n\ntest : all\n\t@echo \"\\n============TESTS============\\n\"\n\t@python3 test/test.py\n\n################################################################################\n" }, { "alpha_fraction": 0.6184166669845581, "alphanum_fraction": 0.6257038712501526, "avg_line_length": 27.75238037109375, "blob_id": "de97651172281a68618a2cf326d8d651e7399224", "content_id": "c4404ff10546c98543669dd084bc58b93c1e01ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6044, "license_type": "permissive", "max_line_length": 120, "num_lines": 210, "path": "/test/test.py", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport inspect\nimport os\nimport subprocess\nimport sys\n\n\nclass Test:\n\n\tdef __init__(self, cmd, out=\"\", err=\"\"):\n\t\tself.cmd = cmd\n\t\tself.expected_out = out\n\t\tself.expected_err = err\n\n\t\tif self.expected_out and self.expected_out[-1] != '\\n':\n\t\t\tself.expected_out += '\\n'\n\t\tif self.expected_err and self.expected_err[-1] != '\\n':\n\t\t\tself.expected_err += '\\n'\n\t\t\t\n\tdef run(self):\n\t\tp = subprocess.Popen(\n\t\t\t[\"./minishell\", \"-c\", self.cmd],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.PIPE\n\t\t)\n\n\t\tp.stdin.close()\n\t\tp.wait()\n\n\t\tself.returncode = p.returncode\n\t\tself.actual_out = p.stdout.read().decode()\n\t\tself.actual_err = p.stderr.read().decode()\n\t\t\n\t\treturn (\n\t\t\tself.returncode == 0 and\n\t\t\tself.actual_out == self.expected_out and\n\t\t\tself.actual_err == self.expected_err\n\t\t)\n\n\t@property\n\tdef trace(self):\n\t\treturn (\n\t\t\tf\" > {self.cmd}\\n\"\n\t\t\tf\" returned {self.returncode}\\n\"\n\t\t\tf\" stdout: {repr(self.actual_out):9s} (expected {repr(self.expected_out)})\\n\"\n\t\t\tf\" stderr: {repr(self.actual_err):9s} (expected {repr(self.expected_err)})\\n\\n\"\n\t\t)\n\n\nclass CompareTest(Test):\n\n\tdef __init__(self, cmd, cmd_sh=None):\n\t\tsuper().__init__(cmd)\n\t\tself.cmd_sh = cmd_sh if cmd_sh else cmd\n\n\tdef run(self):\n\t\tself.get_expected()\n\t\tsuper().run()\n\n\tdef get_expected(self):\n\t\tp = subprocess.Popen(\n\t\t\t[\"sh\", \"-c\", self.cmd_sh],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.PIPE\n\t\t)\n\n\t\tp.stdin.close()\n\t\tp.wait()\n\n\t\tself.expected_out = p.stdout.read().decode()\n\t\tself.expected_err = p.stderr.read().decode()\n\n\nclass CustomTest(Test):\n\n\tdef __init__(self, cmd, fun):\n\t\tself.fun = fun\n\t\tsuper().__init__(cmd)\n\n\tdef run(self):\n\t\tsuper().run()\n\t\treturn (\n\t\t\tself.returncode == 0 and\n\t\t\tself.fun(self.actual_out, self.actual_err)\n\t\t)\n\n\t@property\n\tdef trace(self):\n\t\treturn (\n\t\t\tf\" > {self.cmd}\\n\"\n\t\t\tf\" returned {self.returncode}\\n\"\n\t\t\tf\" stdout: {repr(self.actual_out):9s}\\n\"\n\t\t\tf\" stderr: {repr(self.actual_err):9s}\\n\"\n\t\t\tf\" function: {inspect.getsource(self.fun)}\\n\\n\"\n\t\t)\n\t\n\nclass Suite:\n\t\n\tsuites = list()\n\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.tests = list()\n\t\tself.nsuccess = 0\n\t\tSuite.suites.append(self)\n\t\n\tdef add(self, test):\n\t\tself.tests.append(test)\n\t\n\tdef run(self):\n\t\tprint (f\"Running '{self.name}': \")\n\t\n\t\tfor test in self.tests:\n\t\t\tif test.run() is False:\n\t\t\t\tprint('✗', end=\"\")\n\t\t\t\tfor _ in range(len(self.tests) - 1 - self.nsuccess):\n\t\t\t\t\tprint('⚬', end=\"\")\n\t\t\t\n\t\t\t\tprint(f\"\\n\\nTrace:\\n{test.trace}\")\n\t\t\t\treturn False\n\t\t\t\n\t\t\tprint(u'✓', end='')\n\t\t\tself.nsuccess += 1\n\t\t\n\t\tprint(\"\\n\")\n\t\treturn True\n\t\n\t@classmethod\n\tdef run_suites(cls):\n\t\tnsuccess = 0\n\t\tfor suite in cls.suites:\n\t\t\tnsuccess += suite.run()\n\t\t\n\t\tnfail = len(cls.suites) - nsuccess\n\t\tntest_total = sum(len(suite.tests) for suite in cls.suites)\n\t\tprint(f\"ran {ntest_total} tests, {nsuccess} suites succeeded, {nfail} suites failed\")\n\t\tif nfail == 0:\n\t\t\tprint (\"\\033[92mSUCCESS\\033[0m\\n\")\n\t\telse:\n\t\t\tprint (\"\\033[93mFAIL\\033[0m\\n\")\n\t\t\n\t\tcls.suites = list()\n\t\treturn nfail == 0\n\n\ndef prepare_tests():\n\n\tsuite = Suite(\"fork and execve\")\n\tsuite.add(Test('foo', err='msh: command not found'))\n\tsuite.add(CompareTest('ls'))\n\tsuite.add(CompareTest('ls -laF'))\n\tsuite.add(CompareTest('ls -l -a -F'))\n\tsuite.add(CompareTest('\t\tls -l -a -F'))\n\tsuite.add(CompareTest('\t\t\t\tls \t\t'))\n\tsuite.add(CompareTest(' ls -l\t\t-aF '))\n\tsuite.add(Test(''))\n\tsuite.add(Test(' '))\n\tsuite.add(Test('\t\t'))\n\n\tsuite = Suite(\"builtins\")\n\tsuite.add(Test('exit', out=\"\", err=\"\"))\n\tsuite.add(Test('echo It works', out=\"It works\"))\n\tsuite.add(Test('echo \"Hello\"', out=\"Hello\"))\n\tsuite.add(Test('echo \"', out='\"'))\n\tsuite.add(Test('cd /usr/bin ; /bin/pwd', out='/usr/bin'))\n\tsuite.add(Test('cd /usr ; cd bin ; /bin/pwd', out='/usr/bin'))\n\tsuite.add(Test('cd ; /bin/pwd', out=os.getenv(\"HOME\")))\n\tsuite.add(Test('cd /usr/bin ; cd /tmp ; cd - ; /bin/pwd', out='/usr/bin'))\n\tsuite.add(Test('cd /usr/bin /tmp', err=\"msh: cd: too many arguments\"))\n\n\tsuite = Suite(\"environment\")\n\tsuite.add(CompareTest('env'))\n\tsuite.add(Test('env a b', err=\"msh: env: too many arguments\"))\n\tsuite.add(CustomTest('setenv FOO bar ; env', lambda out, err: out.count('FOO=bar') == 1))\n\tsuite.add(CustomTest('setenv FOO bar ; /usr/bin/env', lambda out, err: out.count('FOO=bar') == 1))\n\tsuite.add(CustomTest('setenv FOO bar ; unsetenv FOO ; env', lambda out, err: out.count('FOO=bar') == 0))\n\tsuite.add(CustomTest('setenv FOO bar ; unsetenv FOO ; unsetenv FOO ; env', lambda out, err: out.count('FOO=bar') == 0))\n\tsuite.add(CustomTest('setenv FOO bar ; unsetenv FOO ; /usr/bin/env', lambda out, err: out.count('FOO=bar') == 0))\n\tsuite.add(CustomTest('setenv FOO bar ; setenv FOO baz ; env', lambda out, err: out.count('FOO=bar') == 0))\n\tsuite.add(CustomTest('setenv FOO bar ; setenv FOO baz ; env', lambda out, err: out.count('FOO=') == 1))\n\tsuite.add(CustomTest('setenv FOO bar ; setenv FOO baz ; env', lambda out, err: out.count('FOO=baz') == 1))\n\tsuite.add(Test('setenv a b c', err=\"msh: setenv: too many arguments\"))\n\tsuite.add(Test(\"setenv PATH /bin:/usr/bin\", \"\", \"\"))\n\n\tsuite = Suite(\"variable expansion\")\n\tsuite.add(Test('setenv FOO bar ; echo $FOO', out=\"bar\"));\n\tsuite.add(Test('setenv FOO bar ; echo blabla-$FOO/hello', out=\"blabla-bar/hello\"));\n\tsuite.add(Test('echo $HOME', out=os.getenv(\"HOME\")));\n\tsuite.add(Test('echo ~', out=os.getenv(\"HOME\")));\n\tsuite.add(Test('echo ~/hello', out=f'{os.getenv(\"HOME\")}/hello'));\n\tsuite.add(Test('setenv FOO bar ; echo ~/$FOO', out=f'{os.getenv(\"HOME\")}/bar'));\n\n\tsuite = Suite(\"PATH management\")\n\tsuite.add(CompareTest('unsetenv PATH ; setenv PATH \"/bin:/usr/bin\" ; ls', '\"PATH=/bin:/usr/bin\" ls'))\n\tsuite.add(Test('unsetenv PATH ; ls', err=\"msh: command not found\"))\n\tsuite.add(CompareTest('unsetenv PATH ; /bin/ls', \"ls\"))\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) == 2:\n\t\tsys.chdir(sys.argv[1])\n\tif len(sys.argv) > 2 or not os.path.exists(\"./minishell\"):\n\t\tsys.exit(f\"Usage: {__file__} <path-to-project-directory>\")\n\n\tprepare_tests()\n\tSuite.run_suites()\n" }, { "alpha_fraction": 0.38522636890411377, "alphanum_fraction": 0.4058776795864105, "avg_line_length": 26.670330047607422, "blob_id": "7a740edad934e1aefc4bbd94701d6eec91eea9c4", "content_id": "eafb933c82a129ccfb33974b65b0ed501e619875", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2518, "license_type": "permissive", "max_line_length": 80, "num_lines": 91, "path": "/src/msh_expand.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* msh_expand.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 04:46:20 by unite #+# #+# */\n/* Updated: 2020/09/18 17:50:34 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nstatic const char\t*expand_tilde(const char *s)\n{\n\tstatic char\tbuf[MSH_TOK_BUFSIZE];\n\n\tif (ft_strcmp(s, \"~\") == 0)\n\t\tft_strlcpy(buf, ft_getenv(\"HOME\"), MSH_TOK_BUFSIZE);\n\telse if (ft_strncmp(s, \"~/\", 2) == 0)\n\t{\n\t\tft_strlcpy(buf, ft_getenv(\"HOME\"), MSH_TOK_BUFSIZE);\n\t\tft_strlcat(buf, s + 1, MSH_TOK_BUFSIZE);\n\t}\n\telse\n\t\treturn (s);\n\treturn (buf);\n}\n\nstatic char\t\t\t*extract_variable(const char *s)\n{\n\tsize_t\ti;\n\tchar\t*var;\n\n\ti = 0;\n\twhile (s[i] && s[i] != '_' && ft_isalnum(s[i]))\n\t\ti++;\n\tif (!(var = ft_strndup(s, i)))\n\t\tft_terminate(MSH_ERR_MALLOC, 2);\n\treturn (var);\n}\n\nstatic const char\t*expand_parameters(const char *s)\n{\n\tstatic char\tbuf[MSH_TOK_BUFSIZE];\n\tconst char\t*start;\n\tconst char\t*end;\n\tchar\t\t*var;\n\n\tbuf[0] = 0;\n\tstart = s;\n\twhile ((end = ft_strchr(start, '$')))\n\t{\n\t\tif (*start != '$')\n\t\t\tft_strncat(buf, start, end - start);\n\t\tvar = extract_variable(end + 1);\n\t\tft_strlcat(buf, ft_getenv(var) ? ft_getenv(var) : \"\", MSH_TOK_BUFSIZE);\n\t\tstart = end + 1 + ft_strlen(var);\n\t\tfree(var);\n\t}\n\tft_strlcat(buf, start, MSH_TOK_BUFSIZE);\n\treturn (buf);\n}\n\nstatic const char\t*remove_quotes(const char *s)\n{\n\tstatic char\tbuf[MSH_TOK_BUFSIZE];\n\tsize_t\t\tlen;\n\tsize_t\t\ti;\n\n\tlen = ft_strlen(s);\n\tif (len > 1 && s[0] == '\"' && s[len - 1] == '\"')\n\t\treturn (ft_strncpy(buf, s + 1, len - 2));\n\tif (len > 1 && s[0] == '\\'' && s[len - 1] == '\\'')\n\t\treturn (ft_strncpy(buf, s + 1, len - 2));\n\telse\n\t\treturn (s);\n}\n\nchar\t\t\t\t*msh_expand(const char *s)\n{\n\tchar\t*s1;\n\n\ts = remove_quotes(s);\n\ts = expand_tilde(s);\n\ts = expand_parameters(s);\n\tif (!(s1 = ft_strdup(s)))\n\t\tft_terminate(MSH_ERR_MALLOC, 2);\n\treturn (s1);\n}\n" }, { "alpha_fraction": 0.25748929381370544, "alphanum_fraction": 0.2845934331417084, "avg_line_length": 34.04999923706055, "blob_id": "5cc9331978fddf5f25b2686d5abeac2c99e4cdea", "content_id": "e5bc71b681454fc4270e58352d7a0e293c855faf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1402, "license_type": "permissive", "max_line_length": 80, "num_lines": 40, "path": "/src/builtin/msh_setenv.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* msh_setenv.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 04:22:56 by unite #+# #+# */\n/* Updated: 2020/09/26 22:35:22 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nstatic int\tis_valid_str(const char *s)\n{\n\tsize_t\ti;\n\n\ti = 0;\n\twhile (s[i])\n\t{\n\t\tif (!ft_isalnum(s[i]))\n\t\t\treturn (0);\n\t\ti++;\n\t}\n\treturn (1);\n}\n\nint\t\t\tmsh_setenv(char *const *argv)\n{\n\tif (argv[1] == NULL)\n\t\tft_error(\"setenv: variable name not specified\");\n\telse if (ft_tablen(argv) > 3)\n\t\tft_error(\"setenv: too many arguments\");\n\telse if (!is_valid_str(argv[1]))\n\t\tft_error(\"setenv: variable name must contain alphanumeric characters\");\n\telse\n\t\tft_setenv(argv[1], argv[2], 1);\n\treturn (1);\n}\n" }, { "alpha_fraction": 0.6830266118049622, "alphanum_fraction": 0.6980232000350952, "avg_line_length": 27.764705657958984, "blob_id": "bc3d320dc5eb2a2a7885c17db1bf8c20af437441", "content_id": "de6a3563edca3162098ca660ce589fc12c337de2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1467, "license_type": "permissive", "max_line_length": 259, "num_lines": 51, "path": "/README.md", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "# minishell\n\n*This project is part of the official curriculum at [School 42](https://en.wikipedia.org/wiki/42_(school)).*\n\n## Disclaimer\n\n> In [School 42](https://en.wikipedia.org/wiki/42_(school)), almost every project must be written to comply with a coding standard (also known as the \"[Norm](./docs/subjects/norme.en.pdf)\"). As a result, the implementation of certain parts may appear strange.\n\n\n## Overview\n\n* [Official instructions](docs/subjects/minishell.en.pdf)\n* A personal implementation of a Linux shell with minimal functionality\n\t* builtins: `cd`, `env`, `setenv`, `unsetenv`, `echo`, `exit`\n\t* separation of commands with `;`\n\t* management of `SIGINT` and `SIGTERM`\n\t* an informative user prompt\n* Only the following functions are allowed\n\t* `malloc`, `free`\n\t* `access`\n\t* `open`, `close`, `read`, `write`\n\t* `opendir`, `readdir`, `closedir`\n\t* `getcwd`, `chdir`\n\t* `stat`, `lstat`, `fstat`\n\t* `fork`, `execve`\n\t* `wait`, `waitpid`, `wait3`, `wait4`\n\t* `signal`, `kill`\n\n## Run\n\nTo compile, run\n\n```sh\ngit clone https://github.com/almayor/minishell\ncd minishell\ngit submodule --init --recursive\nmake\n```\nYou can run unit tests with `make test`\n\n## Resources\n\n* <https://brennan.io/2015/01/16/write-a-shell-in-c/>\n\n## Acknowledgements\n\nWe are grateful to the entire team behind School 42 and its [Moscow branch](https://21-school.ru\n), as well as to my fellow students for help and support.\n\n---\nIf you have any questions, please contact me on Github.\n" }, { "alpha_fraction": 0.3031727373600006, "alphanum_fraction": 0.3243243098258972, "avg_line_length": 26.901639938354492, "blob_id": "16725094897920fe4355d0c007261ef91ea9d389", "content_id": "05b0e8427e480f582c778de6153f742c004964fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1702, "license_type": "permissive", "max_line_length": 80, "num_lines": 61, "path": "/src/utils/ft_setenv.c", "repo_name": "almayor/minishell", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_setenv.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: unite <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2020/09/12 04:16:04 by unite #+# #+# */\n/* Updated: 2020/09/18 04:00:17 by unite ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"minishell.h\"\n\nstatic char\t*get_string(const char *name, const char *value)\n{\n\tchar\t*s;\n\n\tif (ft_asprintf(&s, \"%s=%s\", name, value ? value : \"\") < 0)\n\t\tft_terminate(MSH_ERR_MALLOC, 2);\n\treturn (s);\n}\n\nstatic void\tenviron_append(char *s)\n{\n\tchar\t**env;\n\tsize_t\ti;\n\n\tenv = ft_xcalloc(sizeof(char *), ft_tablen(g_environ) + 2);\n\ti = 0;\n\twhile (g_environ[i])\n\t{\n\t\tenv[i] = g_environ[i];\n\t\ti++;\n\t}\n\tenv[i] = s;\n\tfree(g_environ);\n\tg_environ = env;\n}\n\nint\t\t\tft_setenv(const char *name, const char *value, int overwrite)\n{\n\tsize_t\ti;\n\n\ti = 0;\n\twhile (g_environ[i])\n\t{\n\t\tif (ft_strncmp(g_environ[i], name, ft_strlen(name)) == 0)\n\t\t{\n\t\t\tif (overwrite)\n\t\t\t{\n\t\t\t\tfree(g_environ[i]);\n\t\t\t\tg_environ[i] = get_string(name, value);\n\t\t\t}\n\t\t\treturn (0);\n\t\t}\n\t\ti++;\n\t}\n\tenviron_append(get_string(name, value));\n\treturn (0);\n}\n" } ]
12
wandersonpereiradev/ac03-arquitetura
https://github.com/wandersonpereiradev/ac03-arquitetura
edc3849a53aaf42ee373210dda1c40ac28bb5d47
2d50555ff39fb9f50efa3b1837b6dc6a5ce3a87f
556ae6585b4169a612f3cd689d3e1b7389392649
refs/heads/master
2023-08-11T09:03:17.482311
2021-09-19T17:33:02
2021-09-19T17:33:02
407,955,502
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6041055917739868, "alphanum_fraction": 0.6187683343887329, "avg_line_length": 30.090909957885742, "blob_id": "bb95b47f93d7df9ae19774454f7f194318eb42f7", "content_id": "12fc73533f7e7f1f4aff1a780a9973df6d4c4828", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/Calculadora.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "from FabricaOperacoes import *\n\nclass Calculadora(object):\n def calcular(self, valor1, valor2, operacao):\n fabrica = FabricaOperacoes()\n operacao = fabrica.criar(operacao)\n if (operacao == None):\n return 0\n else:\n resultado = operacao.operacao(valor1, valor2)\n return resultado" }, { "alpha_fraction": 0.6133056282997131, "alphanum_fraction": 0.6133056282997131, "avg_line_length": 28.5625, "blob_id": "6bc16f6e6d0a1f5a67ffd83129d6b85f0cf130a7", "content_id": "b9c9d079e2835c2dd399dfeb69d80ba1eee5b07b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/FabricaOperacoes.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "from Adicao import Adicao\nfrom Subtracao import Subtracao\nfrom Multiplicacao import Multiplicacao\nfrom Divisao import Divisao\n\nclass FabricaOperacoes(object):\n \n def criar(self, operador):\n if (operador == 'soma'):\n return Adicao()\n elif (operador == 'subtracao'):\n return Subtracao()\n elif (operador == 'multiplicacao'):\n return Multiplicacao()\n elif (operador == 'divisao'):\n return Divisao()\n " }, { "alpha_fraction": 0.6479290127754211, "alphanum_fraction": 0.665680468082428, "avg_line_length": 29.727272033691406, "blob_id": "6ad4ed3977c9b8c4c0f0ba2e1c274ad339a786aa", "content_id": "b9c399875014f8c732d52a978a3908b33cc068f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 65, "num_lines": 22, "path": "/TestMultiplicacao.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "import unittest\nfrom Calculadora import Calculadora\n\nclass TestMultiplicacao(unittest.TestCase):\n def teste_com_sucesso(self):\n calculadora = Calculadora()\n resultado = calculadora.calcular(7, 3, 'multiplicacao')\n self.assertEqual(resultado, 21)\n\n def teste_com_erro(self):\n calculadora = Calculadora()\n resultado = calculadora.calcular(5, 3, 'multiplicacao')\n self.assertEqual(resultado, 11)\n\n def teste_com_type_error(self):\n calculadora = Calculadora()\n resultado = calculadora.calcular(8, \"3\", 'multiplicacao')\n return self.assertEqual(resultado, 11)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6976743936538696, "alphanum_fraction": 0.713178277015686, "avg_line_length": 17.428571701049805, "blob_id": "3b929a8f3f6a7b1a241fd4c964e208db2abb6e75", "content_id": "b3c16d4622e0b6f0d9bf1f91178633133369c407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/Main.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport abc\nfrom Calculadora import *\n\nconta = Calculadora()\na = conta.calcular(2, 3, 'subtracao')\nprint(a)\n" }, { "alpha_fraction": 0.6486486196517944, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 23.83333396911621, "blob_id": "b7a0074f970670f5380ad8e0f988724e278cc9a3", "content_id": "f073ac7d9fefcb618cc1d702fcef8fc454649842", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/Adicao.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "from Operacao import *\n\nclass Adicao(Operacao):\n def operacao(self, valor1, valor2):\n resultado = valor1 + valor2\n return resultado" }, { "alpha_fraction": 0.6330274939537048, "alphanum_fraction": 0.6544342637062073, "avg_line_length": 28.727272033691406, "blob_id": "97353439eab37e19dfb777663dfc29e560b057f2", "content_id": "d6d79179f66c0eb2a1c303a78e0b5490001bf0fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/TestDivisao.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "import unittest\nfrom Calculadora import Calculadora\n\nclass TestDivisao(unittest.TestCase):\n def teste_com_sucesso(self):\n calculadora = Calculadora()\n resultado = calculadora.calcular(15, 5, 'divisao')\n self.assertEqual(resultado, 3)\n\n def teste_com_erro(self):\n calculadora = Calculadora()\n resultado = calculadora.calcular(15, 5, 'divisao')\n self.assertEqual(resultado, 11)\n\n def teste_com_type_error(self):\n calculadora = Calculadora()\n resultado = calculadora.calcular(15, \"3\", 'divisao')\n return self.assertEqual(resultado, 11)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6692307591438293, "alphanum_fraction": 0.6846153736114502, "avg_line_length": 17.714284896850586, "blob_id": "8a64c1e515f4eea42368fdcfc5beb7993d80205b", "content_id": "7579a79409541c4a39cd786e757fbd7dd43fa66e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/Operacao.py", "repo_name": "wandersonpereiradev/ac03-arquitetura", "src_encoding": "UTF-8", "text": "import abc\n\n\nclass Operacao(metaclass = abc.ABCMeta):\n @abc.abstractmethod\n def operacao(self, valor1, valor2):\n pass" } ]
7
Nchia-Emmanuela/Calculator
https://github.com/Nchia-Emmanuela/Calculator
b662b448c6bfced4febadd8116f225f85929550e
10d7279ac2e15751c152f2aeee251cdfd8ed19b0
eb24de821124370fbf78c6b837f70a6c3ca88f81
refs/heads/main
2023-07-30T04:10:27.349070
2021-09-15T14:35:36
2021-09-15T14:35:36
406,712,854
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5970260500907898, "alphanum_fraction": 0.6171003580093384, "avg_line_length": 25.739999771118164, "blob_id": "5e980b66698ab6a10b3b97ceba20996ac4bef4e1", "content_id": "fa34c91af47c306b771ed3011f86b3060b0eefdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1345, "license_type": "no_license", "max_line_length": 117, "num_lines": 50, "path": "/main.py", "repo_name": "Nchia-Emmanuela/Calculator", "src_encoding": "UTF-8", "text": "from art import logo\n# addition\ndef add(n1, n2):\n return n1 + n2\n\n# subtraction\ndef subtract(n1, n2):\n return n1 - n2\n\n# division\ndef divide(n1, n2):\n return n1 / n2\n\n# multiplication\ndef multiply(n1, n2):\n return n1 * n2\ndef exponent(n1, n2):\n return n1**n2\n# dictionary to be able to access each operation\noperations = {\n \"+\" : add,\n \"-\" : subtract,\n \"/\" : divide,\n \"*\" : multiply,\n \"^\" : exponent\n}\n\n# this calculator function is a recursive function that calls it self \n# it takes no input and has no output.\ndef calculator():\n print(logo)\n numb1 = float(input(\"what's the first number?: \"))\n for operator in operations:\n print(operator)\n should_continue = True\n while should_continue:\n operation_symbol = input(\"pick an operation: \")\n numb2 = float(input(\"what's the next number?: \"))\n\n calculation_function = operations[operation_symbol]\n answer = calculation_function(numb1, numb2)\n print(f\"{numb1} {operation_symbol} {numb2} = {answer}\")\n if input(f\"type 'y' to continue calculating with {answer}, or type 'n' to start a new calculation: \") == 'y':\n numb1 = answer\n else:\n should_continue = False\n # this calculator call will take us back to the beginning\n calculator()\n\ncalculator()\n " }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6892856955528259, "avg_line_length": 30.11111068725586, "blob_id": "8471ea5b3b643d16ff2fa66f3842b9c2c3fcee0c", "content_id": "c07e39b01203c1f91254c16aa5a26b265e3249f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "no_license", "max_line_length": 57, "num_lines": 9, "path": "/README.md", "repo_name": "Nchia-Emmanuela/Calculator", "src_encoding": "UTF-8", "text": "# Calculator\n### Calculator in python:\n> Simple calculator that performs mathematical operations\n> using mathematical operators like +, - / *\n ## instructions\n > Enter the first number,\n > pick and operation and\n > Ennter the second number.\n ### e.g 3+3 = 6, 6-3 =3, 6/3 = 2 etc\n" } ]
2
Plut021/PongGame
https://github.com/Plut021/PongGame
d41c9cac3fa194f4ed9ad7c381b017d4dc3413d7
7780d8b8b5b3ae0b376e57fc26816b7561438cbe
74e81f52716ee3ac6a265968ca6d41fb2a725e74
refs/heads/main
2023-04-29T06:00:41.064828
2021-05-13T15:47:25
2021-05-13T15:47:25
367,063,204
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.60426926612854, "alphanum_fraction": 0.6345296502113342, "avg_line_length": 21.55555534362793, "blob_id": "62b778d9f35530a5e2c432fbd636f2dbca46d63e", "content_id": "5e58b505eb0fed543512cc510037ebf79e825522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4263, "license_type": "no_license", "max_line_length": 109, "num_lines": 189, "path": "/main.py", "repo_name": "Plut021/PongGame", "src_encoding": "UTF-8", "text": "import pygame\nimport os\nimport random\n\n\npygame.font.init()\n\n\nWIDTH, HEIGHT = 2560, 1440\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\n\n# colors\nWHITE = (255,255,255)\n\npygame.display.set_caption(\"pong\")\n\n# load images \nBG = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"back_ground.png\")), (WIDTH,HEIGHT))\nBALL = pygame.image.load(os.path.join(\"assets\", \"ball.png\"))\nPLAYER = pygame.image.load(os.path.join(\"assets\", \"player.png\"))\nWALL = pygame.image.load(os.path.join(\"assets\", \"wall.png\"))\n\n\n\ndef main():\n\trun = True\n\tFPS = 60\n\tmain_font = pygame.font.SysFont(\"comicsans\", 200)\n\twait_start = True\n\tspeed_count = 0\n\n\tclock = pygame.time.Clock()\n\t\n\n\tclass Player:\n\t\tdef __init__(self, img, score_position_x, score_position_y, x, y = HEIGHT/2, vel = 10, starting_score = 0):\n\t\t\tself.img = img \n\t\t\tself.y = y \n\t\t\tself.x = x \n\t\t\tself.vel = vel\n\t\t\tself.mask = pygame.mask.from_surface(self.img)\n\t\t\tself.score = starting_score = 0\n\t\t\tself.score_position_x = score_position_x\n\t\t\tself.score_position_y = score_position_y\n\n\t\tdef draw_player(self, window=WIN):\n\t\t\tself.score_label = main_font.render(f\"{self.score}\", 1, WHITE)\n\n\t\t\tif self.score_position_x < WIDTH/2 - 10:\n\t\t\t\tself.score_position = (self.score_position_x - self.score_label.get_width(), self.score_position_y)\n\t\t\telse:\n\t\t\t\tself.score_position = (self.score_position_x, self.score_position_y)\n\n\t\t\twindow.blit(self.img, (self.x, self.y))\n\t\t\twindow.blit(self.score_label, self.score_position)\n\t\t\t\n\n\tclass Ball:\n\t\tdef __init__(self, img, x=WIDTH/2, y=HEIGHT/2, x_vel = -10, angle_range = (-10, 10)):\n\t\t\tself.img = img\n\t\t\tself.x = x \n\t\t\tself.y = y \n\t\t\tself.x_vel = x_vel\n\t\t\tself.y_vel = random.randint(angle_range[0],angle_range[1])\n\t\t\tself.mask = pygame.mask.from_surface(self.img)\n\t\t\tself.angle = abs(self.y_vel)/abs(self.x_vel)\n\n\n\n\t\tdef draw_ball(self, window=WIN):\n\t\t\twindow.blit(self.img, (self.x, self.y))\n\n\n\tclass Wall:\n\t\tdef __init__(self, img, x, y):\n\t\t\tself.img = img\n\t\t\tself.x = x\n\t\t\tself.y = y\n\t\t\tself.mask = pygame.mask.from_surface(self.img)\n\n\t\tdef draw_wall(self, window=WIN):\n\t\t\twindow.blit(self.img, (self.x,self.y))\n\n\n\n\n\tplayer1 = Player(PLAYER, WIDTH/2 - 100 , 70, 100)\n\tplayer2 = Player(PLAYER, WIDTH/2 + 100, 70, WIDTH - 100)\n\tball = Ball(BALL)\n\twall1 = Wall(WALL, 100, 30)\n\twall2 = Wall(WALL, 100, HEIGHT-70)\n\n\n\tdef player_ball_check(players = [player1, player2], ball=ball):\n\t\tfor player in players:\n\t\t\toffset_x = int(ball.x - player.x)\n\t\t\toffset_y = int(ball.y - player.y)\n\n\t\t\tif player.mask.overlap(ball.mask, (offset_x, offset_y)) != None:\n\t\t\t\tball.x_vel = ball.x_vel * -1\n\n\t\t\n\tdef wall_ball_check(walls = [wall1, wall2], ball=ball):\n\t\tfor wall in walls:\n\t\t\toffset_x = int(ball.x - wall.x)\n\t\t\toffset_y = int(ball.y - wall.y)\n\n\t\t\tif wall.mask.overlap(ball.mask, (offset_x, offset_y)) != None:\n\t\t\t\tball.y_vel = ball.y_vel * -1\n\n\tdef move_ball():\n\t\tball.x += ball.x_vel\n\t\tball.y += ball.y_vel\n\n\n\tdef check_for_score(ball=ball):\n\t\tif ball.x < 100:\n\t\t\tplayer2.score += 1\n\t\t\tball.x = WIDTH/2\n\t\t\tball.y = HEIGHT/2\n\t\t\tball.x_vel = 10\n\n\t\tif ball.x > WIDTH -100:\n\t\t\tplayer1.score += 1\n\t\t\tball.x = WIDTH/2\n\t\t\tball.y = HEIGHT/2\n\t\t\tball.x_vel = -10\n\n\n\n\n\n\tdef redraw_window():\n\t\tWIN.blit(BG, (0,0))\n\n\t\tmove_ball()\n\t\t#collision check\n\t\tplayer_ball_check()\n\t\twall_ball_check()\n\n\t\tcheck_for_score()\n\n\t\tball.draw_ball()\n\t\tplayer1.draw_player()\n\t\tplayer2.draw_player()\n\t\twall1.draw_wall()\n\t\twall2.draw_wall()\n\n\t\t\n\t\tpygame.display.update()\n\n\twhile run:\n\t\tclock.tick(FPS)\n\t\tredraw_window()\n\t\tspeed_count += 1\n\n\t\tif speed_count > 5*FPS and abs(ball.x_vel) < 30:\n\t\t\tspeed_count = 0\n\t\t\tif ball.x_vel > 0:\n\t\t\t\tball.x_vel += 5\n\n\t\t\telif ball.x_vel < 0:\n\t\t\t\tball.x_vel -= 5\n\n\t\t\tif ball.y_vel > 0:\n\t\t\t\tball.y_vel = int(ball.angle * abs(ball.x_vel))\n\n\t\t\telif ball.y_vel < 0:\n\t\t\t\tball.y_vel = int(-ball.angle * abs(ball.x_vel))\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\n\t\tkeys = pygame.key.get_pressed()\n\t\tif keys[pygame.K_w] and player1.y > 0:\n\t\t\tplayer1.y -= player1.vel\n\n\t\tif keys[pygame.K_s] and player1.y < HEIGHT - PLAYER.get_height():\n\t\t\tplayer1.y += player1.vel\n\n\t\tif keys[pygame.K_UP] and player2.y> 0:\n\t\t\tplayer2.y -= player2.vel\n\t\t\n\t\tif keys[pygame.K_DOWN] and player2.y < HEIGHT - PLAYER.get_height():\n\t\t\tplayer2.y += player2.vel \n\n\nmain()\n" } ]
1
pastorenue/sani-app
https://github.com/pastorenue/sani-app
5282b35a04463e0a67af9b6ba940ae75cbf8f42e
ac6151a644c6c4c39aee009d106f3c86f55a41b4
5aa346ff3799e093af32ae0140fba9d792d77ba6
refs/heads/master
2020-02-17T02:13:14.945232
2019-06-28T15:41:08
2019-06-28T15:41:08
125,048,555
0
0
Apache-2.0
2018-03-13T12:31:47
2019-06-28T15:41:23
2020-06-05T18:16:27
CSS
[ { "alpha_fraction": 0.580110490322113, "alphanum_fraction": 0.6044198870658875, "avg_line_length": 34.490196228027344, "blob_id": "3083431a58e5a1b161e0881fd26dc74a203a5491", "content_id": "b83f756634f608663fd53d363696174557bf7086", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1810, "license_type": "permissive", "max_line_length": 124, "num_lines": 51, "path": "/apps/reports/migrations/0002_auto_20180523_0138.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('students', '0001_initial'),\n ('institutions', '0002_auto_20180523_0138'),\n ('reports', '0001_initial'),\n ('staff', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='report',\n name='student',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='students.Student'),\n ),\n migrations.AddField(\n model_name='report',\n name='student_class',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.StudentClass'),\n ),\n migrations.AddField(\n model_name='broadsheet',\n name='batch',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='reports.ReportBatch'),\n ),\n migrations.AddField(\n model_name='broadsheet',\n name='modified_by',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='staff.Teacher'),\n ),\n migrations.AddField(\n model_name='broadsheet',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='broadsheet',\n name='student_class',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='institutions.StudentClass'),\n ),\n ]\n" }, { "alpha_fraction": 0.5202991366386414, "alphanum_fraction": 0.5256410241127014, "avg_line_length": 29.95041275024414, "blob_id": "9cd5473aad74c0606ba0373da4db7ec6a01cf52c", "content_id": "3cfa30e53fc8f5a194308a64c9ca8dd2b94fe426", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3744, "license_type": "permissive", "max_line_length": 124, "num_lines": 121, "path": "/sani_app/static/js/admin_dashboard.js", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "var jsonUrl = \"/results/average/json\";\nvar dJsonUrl = \"/results/demographics/json\";\n\nvar avgChartOptions = {\n credits: {\n enabled: false\n },\n chart: {\n renderTo: 'average_container',\n type: 'column',\n height: 300,\n zoomType: 'xy'\n },\n legend: {enabled: true},\n title: {text: \"Class Average\"},\n xAxis: {labels: {rotation: -45}},\n yAxis: {title: {text: \"Percentage Average\"}, max: 100,},\n exporting: {\n buttons: {\n contextButton: {\n enabled: true,\n menuItems: null,\n symbol: \"menu\",\n }\n }\n },\n series: [],\n};\n\nvar demographicCharOptions = {\n credits: {\n enabled: false\n },\n chart: {\n renderTo: 'd_container',\n type: 'column',\n height: 300,\n zoomType: 'xy'\n },\n legend: {enabled: true},\n title: {text: \"Class Demography (%)\"},\n xAxis: {labels: {rotation: -45}},\n yAxis: {\n title: {\n text: \"Demographics (%)\"\n },\n max: 100,\n },\n exporting: {\n buttons: {\n contextButton: {\n enabled: true,\n menuItems: null,\n symbol: \"menu\",\n }\n }\n },\n series: [],\n};\n\nfunction loadAverage(path){\n $.getJSON(path,\n function(data) {\n var year_val = '';\n if ($('#c_year_select').val() == 'all') {\n year_val = 'current year in the database';\n } else {\n year_val = $('#c_year_select option:selected').html();\n }\n avgChartOptions.subtitle = {text: $('#c_term_select option:selected').html() + ' class averages for '+year_val,}\n avgChartOptions.xAxis.categories = data['class'];\n avgChartOptions.series[0] = ({\"name\": 'Class Average', \"data\": data['avg']});\n avgChartOptions.series[1] = ({\"name\": 'Highest Score', \"data\": data['best']});\n var charts = new Highcharts.Chart(avgChartOptions);\n });\n}\n\nfunction loadDemographics(path){\n $.getJSON(path,\n function(data) {\n var d_class = '';\n if ($('#d_class_select').val() == 'all') {\n d_class = 'All Class Frequency';\n } else {\n d_class = 'Frequency for '+$('#d_class_select option:selected').html();\n }\n demographicCharOptions.subtitle = {text: d_class,}\n demographicCharOptions.xAxis.categories = data['class'];\n demographicCharOptions.series[0] = ({\"name\": 'Males', \"data\": data['male']});\n demographicCharOptions.series[1] = ({\"name\": 'Females', \"data\": data['female']});\n var charts = new Highcharts.Chart(demographicCharOptions);\n });\n}\n\n//For Class Average\n$('#c_year_select').change(function(e){\n e.preventDefault();\n var year = $('#c_year_select').val();\n var term = $('#c_term_select').val();\n var path = jsonUrl + '?name=class_average&year=' + year +'&term=' + term;\n loadAverage(path);\n});\n\n$('#c_term_select').change(function(e){\n e.preventDefault();\n var year = $('#c_year_select').val();\n var term = $('#c_term_select').val();\n var path = jsonUrl + '?name=class_average&year=' + year +'&term=' + term;\n loadAverage(path);\n});\n\n//For Class Demographic\n$('#d_class_select').change(function(e){\n e.preventDefault();\n var d_class = $('#d_class_select').val();\n var path = dJsonUrl + '?name=class_demographic&class=' + d_class;\n loadDemographics(path);\n});\n\nloadAverage(jsonUrl + '?name=class_average&year=' + $('#c_year_select').val() +'&term=' + $('#c_term_select').val());\nloadDemographics(dJsonUrl + '?name=class_demographic&class=' + $('#d_class_select').val());" }, { "alpha_fraction": 0.7275320887565613, "alphanum_fraction": 0.7275320887565613, "avg_line_length": 29.521739959716797, "blob_id": "3e3dc7d51687b999a7f2958168acbea365720ea8", "content_id": "637270b259db9be6ac7b2b0b3dc3973541f352fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "permissive", "max_line_length": 61, "num_lines": 23, "path": "/apps/results/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n\n# Register your models here.\[email protected](Examination)\nclass ExaminationAdmin(admin.ModelAdmin):\n list_display = ('student', 'subject', 'exam_score')\n\[email protected](Test)\nclass TestAdmin(admin.ModelAdmin):\n list_display = ('student', 'subject', 'test_score')\n\[email protected](Assignment)\nclass AssignmentAdmin(admin.ModelAdmin):\n list_display = ('student', 'subject', 'assignment_score')\n\[email protected](Result)\nclass ResultAdmin(admin.ModelAdmin):\n list_display = ('student', 'test_score', 'signed_by')\n\[email protected](Grading)\nclass GradeAdmin(admin.ModelAdmin):\n list_display = ('institution', 'caption', 'start', 'end')" }, { "alpha_fraction": 0.5296703577041626, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 21.75, "blob_id": "98c902d7f7cbede6cca5c37dacaca5dccaf1e7c1", "content_id": "cdcd57e992faecdf5d06190b75da5d5d7fd1c3e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "permissive", "max_line_length": 53, "num_lines": 20, "path": "/apps/config/migrations/0004_config_plan_changed.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-22 08:55\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('config', '0003_auto_20180619_1837'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='config',\n name='plan_changed',\n field=models.BooleanField(default=False),\n ),\n ]\n" }, { "alpha_fraction": 0.6019967198371887, "alphanum_fraction": 0.6174936890602112, "avg_line_length": 25.4212589263916, "blob_id": "84c96d095124373b53e1cbd1cade3cb7b3b64e5b", "content_id": "d4a4c0f317281b0259574edc7046a828561f119b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6711, "license_type": "permissive", "max_line_length": 164, "num_lines": 254, "path": "/sani_app/settings.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango settings for sani_app project.\n\nGenerated by 'django-admin startproject' using Django 1.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\nimport sys\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.join(BASE_DIR, 'apps'))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SANI_SECRET_KEY', '-ijjfv3xo4qi*m0#u&-pv9$##@1l#f3=h55w!v$-o!%u2t@%_+')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('LOCAL_DEBUG', False)\nALLOWED_HOSTS = ['localhost','178.62.68.90','.sani.com.ng']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'staff',\n 'core',\n 'config',\n 'institutions',\n 'subjects',\n 'students',\n 'results',\n 'reports',\n 'easy_pdf',\n 'states',\n 'sorl.thumbnail',\n 'django_crontab',\n 'awards',\n 'payments',\n 'contacts',\n 'insights',\n 'rest_framework',\n 'import_export',\n 'api',\n 'notifications',\n #'rest_framework_filters'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'sani_app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(PROJECT_DIR, 'templates'),], \n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'config.context_processors.setup',\n 'config.context_processors.assessment_context',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'sani_app.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {}\nif DEBUG:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'sanidb', # Or path to database file if using sqlite3.\n 'USER': 'sanidb',\n 'PASSWORD': 'SaniDB18',\n 'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n 'PORT': '', # Set to empty string for default.\n }\n }\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_DIR, 'static'),\n)\n\n\n#Login Credentials and urls\nLOGIN_URL = 'login' #reverse_lazy('login') ##VERY HARMFUL TO LOGINREQUIRED VIEWS\nLOGIN_REDIRECT_URL = 'dashboard'\nLOGOUT_REDIRECT_URL = LOGIN_URL\n\nPAGE_SIZE = 20\nPAGE_ORPHANS = 5\nTOP_READ_SIZE = 15\n\nTEMPLATE_DEBUG=True\n\n# Choice tuples\nTERM_CHOICES = (\n (1, 'First Term'),\n (2, 'Second Term'),\n (3, 'Third Term')\n)\n\nSTATUS_CHOICES = (\n ('A', ('Active')),\n ('G', ('Graduated')),\n ('S', ('Suspended')),\n ('E', ('Expelled')),\n ('L', ('Left'))\n)\n\nSPECIAL_NEEDS = (\n ('Yes', 'Yes'),\n ('No', 'No')\n)\n\nCRONJOBS = [\n ('0 0 * */3 0', 'sani_app.cron.update_class'),\n]\n\n\nPAYSTACK_API_KEY = os.environ.get('PAYSTACK_API_KEY', None)\nIMPORT_EXPORT_USE_TRANSACTIONS = True\n\n# Anymail setup\nANYMAIL = {\n # (exact settings here depend on your ESP...)\n \"MAILGUN_API_KEY\": os.environ.get(\"MAILGUN_API_KEY\", \"key-5b8244a85dd4969806098365b885bf55\"),\n \"MAILGUN_SENDER_DOMAIN\": 'sani.com.ng', # your Mailgun domain, if needed\n}\nEMAIL_BACKEND = \"anymail.backends.mailgun.EmailBackend\" # or sendgrid.EmailBackend, or...\nEMAIL_HOST = 'smtp.mailgun.org'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_HOST_PASSWORD = '6a2325ce9d3215a77a7823540ba0422f'\nEMAIL_USE_TLS = True\n\nTEMPLATE_DEBUG=True\n\n#Newsletter settings\nNEWSLETTER_CONFIRM_EMAIL = False\nSITE_ID=2\nDEFAULT_DOMAIN = \"sani.com.ng\"\n# REST_FRAMEWORK = {\n# 'DEFAULT_FILTER_BACKENDS': (\n# 'rest_framework_filters.backends.DjangoFilterBackend',\n# ),\n# }\n\n# CACHES = {\n# 'default': {\n# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n# 'LOCATION': 'cache_table',\n# }\n# }\n\n# LOGGING = {\n# 'version': 1,\n# 'disable_existing_loggers': True,\n# 'handlers': {\n# 'console': {\n# 'class': 'logging.StreamHandler',\n# },\n# },\n# 'loggers': {\n# 'django.db.backends': {\n# 'level': 'DEBUG',\n# 'handlers': ['console'],\n# }\n# },\n# }\n" }, { "alpha_fraction": 0.6140703558921814, "alphanum_fraction": 0.6140703558921814, "avg_line_length": 28.75757598876953, "blob_id": "3ad5b0f99d2ff45b6640e0ff484efc6b123829ee", "content_id": "37a43e798b128682a1cd4b3553a8220d42db2bb9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "permissive", "max_line_length": 78, "num_lines": 33, "path": "/apps/insights/util_managers.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from api.errors import *\n\nclass RecordQueryset(object):\n def __init__(self, school):\n self._school = school\n self._filter = None\n \n def get_queryset(self, *args, **kwargs):\n \"\"\"\n This returns the queryset in json format that is used for the analysis\n DESCRIPTION:\n queryset for the particular analysis type\n ARGS:\n None\n RETURN:\n data (dict): Return a dictionary of data for the analysis\n \"\"\"\n raise NotImplementedError(\"This method needs to be implemented\")\n \n def get_filters(self, *args, **kwargs):\n raise NotImplementedError(\"This method needs to be implemented\") \n\n\nclass AssessmentRecord(RecordQueryset):\n\n def get_queryset(self, *args, **kwargs):\n pass\n \n def get_filters(self, *args, **kwargs):\n pass\n\n def get_subject_performance(subject, year=None):\n results = Result.objects.filter(school=self._school, subject=subject)\n\n\n\n \n \n" }, { "alpha_fraction": 0.6280193328857422, "alphanum_fraction": 0.6292270421981812, "avg_line_length": 50.75, "blob_id": "0ac8b94156031b93d9dea556027f375417ebde6e", "content_id": "80cdc2b00fcdce96690ebef43b8646ccacc47b3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 828, "license_type": "permissive", "max_line_length": 115, "num_lines": 16, "path": "/apps/contacts/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom contacts.models import Contact\nfrom django.utils.translation import ugettext_lazy as _\n\nclass ContactForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(ContactForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget.attrs = {'placeholder' : _(u'Full name'), 'class': 'form-control' }\n self.fields['email'].widget.attrs = {'placeholder' : _(u'Email'), 'class': 'form-control'}\n self.fields['subject'].widget.attrs = {'placeholder' : _(u'Subject'), 'class': 'form-control'}\n self.fields['message'].widget.attrs = {'placeholder' : _(u'Message'), 'class': 'form-control', 'rows': '6'}\n \n class Meta:\n model = Contact\n fields = ['name', 'email', 'subject', 'message']\n" }, { "alpha_fraction": 0.6261785626411438, "alphanum_fraction": 0.6317248940467834, "avg_line_length": 38.21739196777344, "blob_id": "cc9111341f3defaa963fb72e7504f59dd972f49c", "content_id": "a3b9a92116ed83aa343c15a3dd67582ae6135f22", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1803, "license_type": "permissive", "max_line_length": 120, "num_lines": 46, "path": "/apps/payments/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.shortcuts import get_object_or_404\nfrom .models import AccessToken, TokenApplication, AccessCard, Plan\nfrom config.utils import pin_generator\n# Register your models here.\[email protected](AccessToken)\nclass AccessTokenAdmin(admin.ModelAdmin):\n list_display = ('token_application', 'token')\n\n def save_model(self, request, obj, form, change):\n if obj.token:\n pass\n else:\n obj.token = 'TOKEN-'+pin_generator(length=15)\n token_application = get_object_or_404(TokenApplication, application_id=obj.token_application.application_id)\n token_application.status = 'A'\n token_application.save()\n super(AccessTokenAdmin, self).save_model(request, obj, form, change)\n\n\[email protected](TokenApplication)\nclass TokenApplicationAdmin(admin.ModelAdmin):\n list_display = ('school','term','year','application_id', 'status')\n\n def save_model(self, request, obj, form, change):\n if change:\n if obj.is_paid:\n if not AccessToken.objects.filter(token_application=obj).exists():\n access_token = AccessToken.objects.create(\n token_application = obj,\n token = 'TOKEN-'+pin_generator(length=15)\n )\n obj.status = 'A'\n obj.save()\n else:\n pass\n super(TokenApplicationAdmin, self).save_model(request, obj, form, change)\n\n\[email protected](AccessCard)\nclass AccessCardAdmin(admin.ModelAdmin):\n list_display = ('student','access_code','term', 'year','school_token')\n\[email protected](Plan)\nclass PlanAdmin(admin.ModelAdmin):\n list_display = ('name', 'plan_code', 'amount', 'student_limit', 'staff_limit')" }, { "alpha_fraction": 0.6090534925460815, "alphanum_fraction": 0.6090534925460815, "avg_line_length": 39.5, "blob_id": "6860a5bbfd1bdcea5f02f9000fb82493cadb92a9", "content_id": "9982ee2647b0d67aa3e5b917260853681f273167", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "permissive", "max_line_length": 77, "num_lines": 12, "path": "/apps/staff/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', StaffListView.as_view(), name='list'),\n url(r'^register/$', register, name='new-staff'),\n url(r'^profile/(?P<staff_slug>[\\w-]+)$', staff_profile , name='profile'),\n url(r'^edit/$', edit_profile , name='edit'),\n url(r'^officers/$', offices , name='office'),\n url(r'^id-json/$', staff_id_json , name='staff-json'),\n url(r'^admin-json$', staff_admin_json , name='staff-admin-json'),\n]\n" }, { "alpha_fraction": 0.7763496041297913, "alphanum_fraction": 0.7763496041297913, "avg_line_length": 34.45454406738281, "blob_id": "89d573f31e2f0cbe86075e74511882a29e163fd8", "content_id": "4dfebb5c2a33ddade0f43a5f0b7e3fcb803258c9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "permissive", "max_line_length": 53, "num_lines": 11, "path": "/apps/institutions/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import StudentClass, Institution\nfrom config.utils import pin_generator\n# Register your models here.\[email protected](StudentClass)\nclass StudentClassAdmin(admin.ModelAdmin):\n list_display = ('caption', 'nick_name')\n\[email protected](Institution)\nclass InstitutionAdmin(admin.ModelAdmin):\n list_display = ('name', 'location', 'short_code')" }, { "alpha_fraction": 0.5745901465415955, "alphanum_fraction": 0.5975409746170044, "avg_line_length": 30.28205108642578, "blob_id": "a2a062281cbee81637a31a2f0babd1e32d70dcf0", "content_id": "dfaa211e680e8ad98e94800fc9de4581df33d8c3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1220, "license_type": "permissive", "max_line_length": 123, "num_lines": 39, "path": "/apps/institutions/migrations/0002_auto_20180523_0138.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('states', '0001_initial'),\n ('institutions', '0001_initial'),\n ('staff', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='studentclass',\n name='form_teacher',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='staff.Teacher'),\n ),\n migrations.AddField(\n model_name='studentclass',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='institution',\n name='region',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='states.State'),\n ),\n migrations.AlterUniqueTogether(\n name='studentclass',\n unique_together=set([('caption', 'nick_name')]),\n ),\n ]\n" }, { "alpha_fraction": 0.6692759394645691, "alphanum_fraction": 0.6692759394645691, "avg_line_length": 30.96875, "blob_id": "078b91ea0be1c681a0a7402d12a7abc68528928e", "content_id": "e4dc21340f31bc8cf34e605c3f21133039ab69f1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1022, "license_type": "permissive", "max_line_length": 88, "num_lines": 32, "path": "/apps/api/permissions.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from rest_framework import permissions\n\nclass StaffAdminAccessPermission(permissions.BasePermission):\n \"\"\"\n Custom permission for admin staff of a particular school\n \"\"\"\n \n def has_permission(self, request, view):\n if hasattr(request.user, 'teacher'):\n teacher = request.user.teacher\n return teacher.is_admin\n return False\n\nclass StaffAccessPermission(permissions.BasePermission):\n \"\"\"\n Custom permission for staff of a particular school\n \"\"\"\n \n def has_permission(self, request, view):\n return hasattr(request.user, 'teacher')\n\nclass StudentOwnAccountOrStaffPermission(permissions.BasePermission):\n \"\"\"\n Custom permission to check for access if the user is student with own account or is \n staff\n \"\"\"\n def has_object_permission(self, request, view, obj):\n if hasattr(request.user, 'student'):\n return obj.user == request.user\n if hasattr(request.user, 'teacher'):\n return True\n return False" }, { "alpha_fraction": 0.692890465259552, "alphanum_fraction": 0.7071678042411804, "avg_line_length": 52.625, "blob_id": "39d542d98d575f7698316b6c9fee97991fdfd09d", "content_id": "907e3500bfa85d094003b2627f9ff4530ebbb32f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3432, "license_type": "permissive", "max_line_length": 222, "num_lines": 64, "path": "/sani_app/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "\"\"\"sani_app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.views.generic.base import TemplateView\nfrom django.contrib.auth import views as auth_views\nfrom django.conf.urls.static import static\nfrom django.core.urlresolvers import reverse\nfrom core.views import dashboard, index, change_password\nfrom django.conf import settings\nfrom django.conf.urls import handler404, handler500\nfrom core.views import error_404, error_500\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n # Custom Apps BaseViews\n url(r'^$', index , name='home'),\n url(r'^staff/', include('staff.urls', namespace='staff')),\n url(r'^core/', include('core.urls', namespace='core')),\n url(r'^config/', include('config.urls', namespace='config')),\n url(r'^institution/', include('institutions.urls', namespace='institution')),\n url(r'^student/', include('students.urls', namespace='students')),\n url(r'^subject/', include('subjects.urls', namespace='subjects')),\n url(r'^results/', include('results.urls', namespace='results')),\n url(r'^reports/', include('reports.urls', namespace='reports')),\n url(r'^payments/', include('payments.urls', namespace='payments')),\n url(r'^contact-us/', include('contacts.urls', namespace='contacts')),\n url(r'^pricing$', TemplateView.as_view(template_name=\"pricing.html\"), name=\"pricing\"),\n url(r'^insights/', include('insights.urls', namespace='insights')),\n url(r'^notifications/', include('notifications.urls', namespace='notifications')),\n url(r'^api/v1.0/', include('api.urls')),\n \n # Auth Views\n url(r'^auth/login/$', auth_views.LoginView.as_view(template_name='login.html'), name='login'),\n url(r'^auth/password_change$', change_password, name='password-change'),\n url(r'^auth/password_reset$', auth_views.PasswordResetView.as_view(template_name='password_reset.html'), name='password_reset'),\n url(r'^auth/password_reset/done$', auth_views.PasswordResetDoneView .as_view(template_name='password_reset_done.html'), name='password_reset_done'),\n url(r'^auth/reset/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html'), name='password_reset_confirm'),\n url(r'^auth/reset/done/$', auth_views.PasswordResetCompleteView.as_view(template_name='password_reset_complete.html'), name='password_reset_complete'),\n url(r'^auth/logout/$', auth_views.LogoutView.as_view(), name='logout'),\n url(r'^auth/dashboard/$', dashboard, name='dashboard'),\n]+static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n\nurlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n#Use my custom error pages instead\nhandler404 = error_404\nhandler500 = error_500\n" }, { "alpha_fraction": 0.5323275923728943, "alphanum_fraction": 0.6012930870056152, "avg_line_length": 22.200000762939453, "blob_id": "c8208a2969eec9292b03c3a1c49858b4741d05a5", "content_id": "ffb1f1b924ae56aa9c9755061b7d7e6abeb6bb74", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "permissive", "max_line_length": 58, "num_lines": 20, "path": "/apps/results/migrations/0003_result_teacher_comment.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-10-03 20:22\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('results', '0002_auto_20180523_0138'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='result',\n name='teacher_comment',\n field=models.TextField(blank=True, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.560860812664032, "alphanum_fraction": 0.5803631544113159, "avg_line_length": 37.128204345703125, "blob_id": "17ee62567232e2052da7a401d6848d021c8c3196", "content_id": "ec225d434d9af060a66eacc424713f4e9d5a7302", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1487, "license_type": "permissive", "max_line_length": 169, "num_lines": 39, "path": "/apps/subjects/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('institutions', '0001_initial'),\n ('staff', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Subject',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=150)),\n ('short_code', models.CharField(blank=True, max_length=10, null=True)),\n ('date_entered', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('head_teacher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='head_teacher', to='staff.Teacher')),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ('teachers', models.ManyToManyField(to='staff.Teacher')),\n ],\n options={\n 'ordering': ('name',),\n },\n ),\n migrations.AlterUniqueTogether(\n name='subject',\n unique_together=set([('name', 'short_code')]),\n ),\n ]\n" }, { "alpha_fraction": 0.43142056465148926, "alphanum_fraction": 0.441742479801178, "avg_line_length": 41.656715393066406, "blob_id": "def5dfb24e6d6ee4ff42f83bf03cae5f1fa08920", "content_id": "4bc1aac955e0724abb61b533a3a811d45b40abd4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5716, "license_type": "permissive", "max_line_length": 195, "num_lines": 134, "path": "/apps/students/templates/students/edit.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'students/list.html' %}\n{% load static thumbnail %}\n{% block extracss %}\n<link rel=\"stylesheet\" href=\"{% static 'css/profiles.css' %}\">\n{% endblock %}\n\n{% block title %} Edit Profile {% endblock %}\n{% block inner-content %}\n<div id=\"class-page-content\">\n\n<div class=\"teachers table\">\n <div class=\"header\">\n <h1>{{student.last_name}}, {{student.first_name}} 's Account</h1>\n </div>\n</div>\n\n<div class=\"choose-section-name\">\n <div class=\"name-entry\">\n <div>\n <a <a href=\"{{student.get_absolute_url}}\"><h1 class=\"title center\">{{student.user}}</h1></a>\n </div>\n \n <div class=\"basicprofile vcard\">\n <div class=\"row\">\n <div class=\"col-md-4 col-sm-12\">\n <div class=\"panel panel-default add-padding\">\n <form class=\"add-padding\" method=\"POST\" id=\"form_update\" action=\"{% url 'students:update-photo' student_slug=student.slug%}\" enctype=\"multipart/form-data\">{% csrf_token %}\n <input id=\"id_photo\" type=\"file\" style=\"position: absolute; top: -300px;\" name=\"photo\">\n <input type=\"hidden\" name=\"user_id\" value=\"{{student.id}}\">\n <input type=\"submit\" id=\"frm_submit\" hidden=\"hidden\">\n <button id=\"photo_change\" class=\"btn btn-sm btn-main\" type=\"button\">Change Photo</button>\n </form>\n {% if student.photo %}\n {% thumbnail student.photo \"130x130\" crop=\"center\" as im %}\n <img class=\"img-circle\" src=\"{{im.url}}\" width=\"{{im.width}}\" height=\"{{im.height}}\">\n {% endthumbnail %}\n {% else %}\n <img class=\"img-circle\" src=\"{% static 'img/profile-icon.jpg' %}\" />\n {% endif %}\n <div id=\"container\" style=\"display: none;\">\n <h5>Image Preview</h5>\n <img id=\"img_preview\" src=\"\" style=\"width: 100px; height: 110px; border-radius: 4px; padding: 3px; border: 2px solid #f1f1f1;\">\n </div>\n </div>\n </div>\n <div class=\"col-md-4 col-sm-12\">\n <div class=\"panel panel-default add-padding\">\n <h3><strong>{{ student.full_name }}</strong></h3>\n <div>\n <h4>{{ student.student_class.caption }}</h4>\n </div>\n <div>\n Account Status: {{student.get_status}}\n <span> <i class=\"icons icons-sm icons-info\" data-toggle=\"popover\" title=\"The student's current status in the school\"></i></span>\n \n </div>\n {% if student.parent_phone_number %}\n <div class=\"tel\">\n <i class=\"icons icons-sm icons-mobile\"></i>\n <span class=\"value\">{{ student.parent_phone_number }}</span>\n </div>\n {% endif %}\n </div>\n </div>\n <div class=\"col-md-4 col-sm-12\">\n <div class=\"panel panel-default add-padding\">\n <h4>{{student.school}}</h4>\n <div class=\"\">\n {%if student.school.logo %}\n {% thumbnail student.school.logo \"127x127\" crop=\"top\" as im %}\n <img class=\"img-circle\" src=\"{{ im.url}}\" />\n {% endthumbnail %}\n {% else %}\n <img class=\"img-circle\" src=\"{% static 'img/school-icon.jpg' %}\" />\n {% endif %}\n </div>\n </div>\n </div>\n </div>\n </div>\n\n <div class=\"row\">\n <form method=\"POST\" action=\"\" enctype=\"multipart/form-data\">{% csrf_token %}\n <div class=\"col-md-6 form-group\">\n {% if user.teacher.is_admin %}\n <h3>Institution Details</h3>\n {{inst_form}}\n {% endif %}\n <h3>Personal Details</h3>\n {{p_form}}\n </div>\n \n <div class=\"col-md-6 form-group\">\n <h3>Basic Details</h3>\n {{b_form}}\n </div>\n <div class=\"col-md-12 form-group\">\n <button id=\"id_save\" type=\"submit\" class=\"btn btn-main\">Update</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n{% endblock inner-content %}\n{% block extrajs %}\n<script>\n $(\"#alt_save\").click(function() {\n $(\"#id_save\").click();\n });\n</script>\n\n<script>\n file_input = document.getElementById('id_photo');\n $(\"#photo_change\").click(function() {\n console.log(this);\n file_input.click(); \n\n });\n\n function preview_image(file_input_id, preview_image_id){\n var ofReader = new FileReader();\n ofReader.readAsDataURL(document.getElementById(file_input_id).files[0]);\n ofReader.onload = function(ofReader){\n document.getElementById(preview_image_id).src = ofReader.target.result;\n }\n }\n\n $('#id_photo').change(function(){\n preview_image('id_photo','img_preview');\n $('#container').css('display', 'inline-block');\n $('#frm_submit').click();\n }); \n</script>\n{% endblock %}\n" }, { "alpha_fraction": 0.6777226328849792, "alphanum_fraction": 0.6803342700004578, "avg_line_length": 37.2400016784668, "blob_id": "643292038b77a017ddac9cf7d711af47c9ff40e3", "content_id": "92c31bab72fbb021760ad6ff85c2e08d2f7e59ac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3829, "license_type": "permissive", "max_line_length": 94, "num_lines": 100, "path": "/apps/subjects/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView\nfrom .models import Subject\nfrom django.conf import settings\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.db import transaction\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\n\nclass SubjectListView(ListView):\n model = Subject\n template_name = 'subjects/list.html'\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self):\n queryset = super(SubjectListView, self).get_queryset()\n queryset = queryset.filter(school=self.request.user.teacher.school)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(SubjectListView, self).get_context_data(**kwargs)\n\n queryset = self.get_queryset()\n paginator = Paginator(queryset, self.paginated_by)\n page = self.request.GET.get('page')\n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n context['subjects'] = queryset\n context['count'] = self.get_queryset().count()\n return context\n\n @method_decorator(user_passes_test(lambda u: getattr(u.teacher, 'is_admin', None))) \n def dispatch(self, request, *args, **kwargs):\n return super(SubjectListView, self).dispatch(request, *args, **kwargs)\n\n\[email protected]\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef new_subject(request):\n template_name = 'subjects/new_subject.html'\n school = request.user.teacher.school\n if request.method == 'POST':\n form = SubjectCreationForm(school, request.POST)\n\n #we need to get the teachers list manually\n #so we can add them individually\n teachers = request.POST.getlist('teachers')\n if form.is_valid():\n subject = form.save(commit=False)\n subject.school = school\n subject.save()\n\n #assign each teacher manually to the subject\n for i in teachers:\n teacher = get_object_or_404(Teacher, pk=int(i))\n subject.teachers.add(teacher)\n subject.save()\n messages.success(request, \"Subject: '%s' was created successfully\" % (subject))\n return HttpResponseRedirect(reverse('subjects:new-subject'))\n else:\n form = SubjectCreationForm(school)\n return render(request, template_name, {'form': form})\n\n\[email protected]\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef edit_subject(request, subject_id):\n template_name = 'subjects/edit.html'\n school = request.user.teacher.school\n subject = Subject.objects.get(pk=subject_id)\n if request.method == \"POST\":\n form = SubjectCreationForm(school, request.POST, instance=subject)\n if form.is_valid():\n form.save()\n messages.success(request, \"The subject '%s' was successfully updated\" % (subject))\n return HttpResponseRedirect(reverse('subjects:list'))\n else:\n form = SubjectCreationForm(school, instance=subject)\n return render(request, template_name, {'form': form, 'subject': subject})\n\n\[email protected]\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef delete_subject(request, subject_id):\n subject = get_object_or_404(Subject, pk=subject_id)\n subject.delete()\n messages.success(request, \"The subject '%s' was successfully deleted\" % (subject))\n return HttpResponseRedirect(reverse('subjects:list'))\n \n" }, { "alpha_fraction": 0.6372013688087463, "alphanum_fraction": 0.6566553115844727, "avg_line_length": 39.69444274902344, "blob_id": "ec704c6801b8da8cfd0c3aea025b4ec9ecd7b190", "content_id": "8adabbd607eeef7b095327223d026d525f3b8f25", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2930, "license_type": "permissive", "max_line_length": 121, "num_lines": 72, "path": "/apps/institutions/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom config.utils import pin_generator\nfrom django.utils.translation import ugettext, ugettext_lazy as _\n\n# Create your models here.\nclass Institution(models.Model):\n name = models.CharField(max_length=150)\n short_code = models.CharField(max_length=10, null=True, blank=True)\n box_address = models.CharField(max_length=20, null=True, blank=True)\n location = models.CharField(max_length=50)\n region = models.ForeignKey(\"states.State\", null=True)\n registration_id = models.CharField(max_length=30, null=True, blank=True)\n email = models.EmailField(null=True)\n logo = models.ImageField(upload_to=\"uploads/%Y/%m/%d\", null=True, blank=True)\n\n def __str__(self):\n return \"%s, %s\" % (self.name, self.location)\n\n\nCRECHE,NURSERY_ONE,NURSERY_TWO,NURSERY_THREE = range(1,5)\nPRIMARY_ONE,PRIMARY_TWO,PRIMARY_THREE,PRIMARY_FOUR,PRIMARY_FIVE,PRIMARY_SIX = range(5,11)\nJSS_1,JSS_2,JSS_3,SS_1,SS_2,SS_3 = range(11,17)\n\nCLASS_CHOICES = (\n (CRECHE, 'Creche/Pre-Nursery'),\n (NURSERY_ONE, 'Nursery 1'),\n (NURSERY_TWO, _('Nursery 2')),\n (NURSERY_THREE, _('Nursery 3')),\n (PRIMARY_ONE, 'Primary 1'),\n (PRIMARY_TWO, _('Primary 2')),\n (PRIMARY_THREE, _('Primary 3')),\n (PRIMARY_FOUR, _('Primary 4')),\n (PRIMARY_FIVE, _('Primary 5')),\n (PRIMARY_SIX, _('Primary 6')),\n (JSS_1, _('JSS 1')),\n (JSS_2, _('JSS 2')),\n (JSS_3, _('JSS 3')),\n (SS_1, _('SS 1')),\n (SS_2, _('SS 2')),\n (SS_3, _('SS 3')),\n)\n\nclass StudentClass(models.Model):\n school = models.ForeignKey(Institution, null=True)\n generic_class = models.PositiveIntegerField(_(\"Generic/Standard Class Equivalent\"), choices=CLASS_CHOICES, null=True)\n caption = models.CharField('Class Form', max_length=100)\n nick_name = models.CharField('Class Name', max_length=100, null=True, blank=True)\n class_code = models.CharField(max_length=10, null=True)\n class_icon = models.ImageField(upload_to=\"uploads/%Y/%m/%d/\", null=True, blank=True)\n form_teacher = models.ForeignKey('staff.Teacher')\n max_student = models.PositiveIntegerField('Maximum Number of Students to Accomodate', default=0)\n no_subjects = models.PositiveIntegerField('Number of subjects offered', default=0)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n if self.nick_name is not None:\n return \"{}-{}\".format(self.caption, self.nick_name)\n return \"{}\".format(self.caption)\n\n class Meta:\n ordering = ('generic_class','nick_name')\n unique_together = ('caption', 'nick_name')\n verbose_name = u'Student Class'\n verbose_name_plural = u'Student Classes'\n\n @property\n def get_generic_class(self):\n generic_class_dict = dict(CLASS_CHOICES)\n if self.generic_class:\n return generic_class_dict[self.generic_class]\n return 'Not Set'\n" }, { "alpha_fraction": 0.5432801842689514, "alphanum_fraction": 0.5706150531768799, "avg_line_length": 31.55555534362793, "blob_id": "1b8b76eefb211d6aef375b9354fa7347b9c39ccb", "content_id": "75cb9e4551ed14d16b72887d954a4aebdf4ede48", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 878, "license_type": "permissive", "max_line_length": 228, "num_lines": 27, "path": "/sani_app/templates/password_reset_email.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<head>\n <title>SANI: Welcome</title>\n <style type=\"text/css\">\n body{\n color:#fff;\n }\n p{\n background: #542437;\n padding:10px 20px\n }\n \n </style>\n</head>\n<body>\n <p>\n A user, probably you, requested a password reset of {{email}} on SANI.,\n <br/>\n \n To complete this request, clicki the below link to reset your password. You may also copy and paste the link below in your browser.\n <a href=\"{{protocol}}://{{domain}}{% url 'password_reset_confirm' uidb64=uid token=token %}\" style=\"border-radius: 30px; padding: 8px 35px; background: #3b5998; color: #fff; text-decoration: none;\">Reset Password Now</a>\n </p>\n <br/><br/><br/>\n <p>{{protocol}}://{{domain}}{% url 'password_reset_confirm' uidb64=uid token=token %}</p>\n</body>\n</html>" }, { "alpha_fraction": 0.5656205415725708, "alphanum_fraction": 0.5772705674171448, "avg_line_length": 57.41666793823242, "blob_id": "8028013fcf58ed7e48a6090a5d7e1a931cc796db", "content_id": "0962d7f28984f03ce510e1fedb230c1affb15af9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4206, "license_type": "permissive", "max_line_length": 196, "num_lines": 72, "path": "/apps/staff/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('institutions', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Position',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100, verbose_name='Office Title')),\n ('description', models.TextField(blank=True)),\n ('reports_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reports', to='staff.Position')),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ],\n options={\n 'ordering': ('name',),\n 'verbose_name_plural': 'Positions',\n 'verbose_name': 'Position',\n },\n ),\n migrations.CreateModel(\n name='Qualification',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('caption', models.CharField(choices=[('PhD', 'PhD'), ('MSc', 'MSc'), ('BSc', 'BSc'), ('HND', 'HND'), ('OND', 'OND'), ('FSLC', 'FSLC')], max_length=10)),\n ('course', models.CharField(max_length=200)),\n ('specify', models.CharField(blank=True, max_length=200, null=True, verbose_name='Please specify if others')),\n ('year_of_degree', models.DateField()),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('institution', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ],\n ),\n migrations.CreateModel(\n name='Teacher',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, choices=[('Mr.', 'Mr'), ('Mrs', 'Mrs'), ('Miss', 'Miss'), ('Dr.', 'Dr'), ('Prof', 'Prof'), ('Mallam', 'Mallam')], max_length=20, null=True)),\n ('staff_id', models.CharField(blank=True, max_length=50, null=True)),\n ('first_name', models.CharField(blank=True, max_length=50, null=True)),\n ('last_name', models.CharField(blank=True, max_length=50, null=True)),\n ('photo', models.ImageField(blank=True, null=True, upload_to='uploads/%Y/%m/%d')),\n ('gender', models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=2, null=True)),\n ('marital_status', models.CharField(blank=True, choices=[('S', 'Single'), ('M', 'Married'), ('W', 'Widowed'), ('D', 'Divorced')], max_length=2, null=True)),\n ('email', models.EmailField(blank=True, max_length=254, null=True)),\n ('grade_level', models.PositiveIntegerField(blank=True, null=True)),\n ('phone_number', models.CharField(blank=True, max_length=20, null=True)),\n ('is_admin', models.BooleanField(default=False)),\n ('slug', models.SlugField(blank=True, max_length=250, null=True, unique=True)),\n ('position', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='staff.Position')),\n ('qualification', models.ManyToManyField(blank=True, to='staff.Qualification')),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('first_name',),\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6899224519729614, "alphanum_fraction": 0.6899224519729614, "avg_line_length": 35, "blob_id": "d31729a0bc4db773a869cbd9d7ac3ab52856cb39", "content_id": "bb192d0c7fa0549dc91b813370b8b91680df7f33", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "permissive", "max_line_length": 85, "num_lines": 7, "path": "/apps/contacts/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom contacts.models import Contact\n\[email protected](Contact)\nclass ContactAdmin(admin.ModelAdmin):\n list_display = ('name', 'email', 'subject', 'message', 'created_on', 'status', )\n list_editable = ('status',)\n \n" }, { "alpha_fraction": 0.5915032625198364, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 24.58333396911621, "blob_id": "b342dc84da1f91e1ad761d56a1cf5d1b2c2a1d52", "content_id": "7b39c7a17f6aecc0b8b10b439a9ea9c26278a101", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 306, "license_type": "permissive", "max_line_length": 56, "num_lines": 12, "path": "/apps/insights/templates/insights/compare.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'insights/analytics.html' %}\n{% load static %}\n\n{% block inner-content %}\n<div class=\"analytics-main scroll col-md-10\"> \n <div id=\"container\" style=\"width: 100%;\">\n </div> \n</div>\n{% endblock %}\n{% block inner_js %}\n<script src=\"{% static 'js/comparative.js' %}\"></script>\n{% endblock %}" }, { "alpha_fraction": 0.6384928822517395, "alphanum_fraction": 0.6408689618110657, "avg_line_length": 56.764705657958984, "blob_id": "06a48974ec1670707c818b61699fbdd84f9b6ad4", "content_id": "0f030df3485891fcfe8027eb153a40453ddee454", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2946, "license_type": "permissive", "max_line_length": 160, "num_lines": 51, "path": "/apps/institutions/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import StudentClass, Institution\nfrom staff.models import Teacher\n\n\nclass ClassCreationForm(forms.ModelForm):\n\n def __init__(self, school, *args, **kwargs):\n super(ClassCreationForm, self).__init__(*args, **kwargs)\n self.fields['caption'].widget.attrs = {'placeholder' : 'Enter Class Name e.g. Primary 5', 'class': 'form-control'}\n self.fields['generic_class'].widget.attrs = {'class': 'form-control'}\n self.fields['nick_name'].widget.attrs = {'placeholder' : 'Class Nick/Fun Name e.g. Banana', 'class': 'form-control'}\n self.fields['class_code'].widget.attrs = {'placeholder' : 'Short Class ID (maximum of 5 characters) e.g. NUR2', 'class': 'form-control'}\n self.fields['max_student'].widget.attrs = {'placeholder' : '', 'class': 'form-control'}\n self.fields['form_teacher'].widget.attrs = {'placeholder' : 'Class Form Teacher'}\n self.fields['form_teacher'].queryset = Teacher.objects.filter(school=school)\n self.fields['class_icon'].widget.attrs = {}\n\n class Meta:\n model = StudentClass\n exclude = ('school',)\n\nclass SchoolCreationForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(SchoolCreationForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget.attrs = {'placeholder' : 'Name of School e.g Nigeria World School', 'class': 'form-control'}\n self.fields['short_code'].widget.attrs = {'placeholder' : 'Short Code e.g NWS', 'class': 'form-control'}\n self.fields['box_address'].widget.attrs = {'placeholder' : 'Box Address e.g P.O Box 45', 'class': 'form-control'}\n self.fields['location'].widget.attrs = {'placeholder' : 'School Location e.g Lagos', 'class': 'form-control'}\n self.fields['email'].widget.attrs = {'placeholder' : \"School or Admin User's Email e.g [email protected]\", 'class': 'form-control'}\n self.fields['registration_id'].widget.attrs = {'placeholder' : 'Registration ID. Please click the \"Generate\" button for an ID', 'class': 'form-control'}\n\n class Meta:\n model = Institution\n exclude = ('logo',)\n\n\nclass SchoolForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(SchoolForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget.attrs = {'placeholder' : 'Name of School e.g Nigeria World School', 'class': 'form-control'}\n self.fields['short_code'].widget.attrs = {'placeholder' : 'Short Code e.g NWS', 'class': 'form-control'}\n self.fields['box_address'].widget.attrs = {'placeholder' : 'Box Address e.g P.O Box 45', 'class': 'form-control'}\n self.fields['location'].widget.attrs = {'placeholder' : 'School Location e.g Lagos', 'class': 'form-control'}\n self.fields['email'].widget.attrs = {'placeholder' : 'School Email e.g [email protected]', 'class': 'form-control'}\n\n class Meta:\n model = Institution\n exclude = ('registration_id',)\n" }, { "alpha_fraction": 0.6023166179656982, "alphanum_fraction": 0.6032818555831909, "avg_line_length": 34.74137878417969, "blob_id": "170176341b7501fd35bcf822615d77bb71aee1d3", "content_id": "e52beba9fca00cb6f524e063d4c76046499b25e0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2072, "license_type": "permissive", "max_line_length": 125, "num_lines": 58, "path": "/apps/staff/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom config.utils import create_user\nfrom .models import Teacher, Position\nfrom django.db import transaction\nfrom django.utils.text import slugify\nimport uuid\n\nclass StaffCreationForm(forms.ModelForm):\n \n def __init__(self, *args, **kwargs):\n super(StaffCreationForm, self).__init__(*args, **kwargs)\n self.fields['email'].widget.attrs = {'placeholder' : 'Email e.g. [email protected]', 'class': 'form-control'}\n self.fields['last_name'].widget.attrs = {'placeholder' : 'Staff Surname', 'class': 'form-control'}\n self.fields['first_name'].widget.attrs = {'placeholder' : 'First Name', 'class': 'form-control'}\n self.fields['staff_id'].widget.attrs = {'placeholder' : 'Staff ID', 'class': 'form-control', 'required': 'required'}\n self.fields['gender'].widget.attrs = {'class': 'form-control'} \n\n class Meta:\n model = Teacher\n fields = (\n 'email',\n 'last_name',\n 'first_name',\n 'gender',\n 'staff_id',\n )\n\n\n @transaction.atomic\n def save(self, commit=True):\n user = create_user(self.cleaned_data['last_name'], self.cleaned_data['first_name'])\n # Set default password to this user's username and birth date (if provided):\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['email'])\n user.save()\n\n instance = super(StaffCreationForm, self).save(commit=False)\n instance.user = user\n orig = slugify(instance.last_name)\n if Teacher.objects.filter(slug=instance.slug).exists():\n instance.slug = \"%s-%s\" % (orig, uuid.uuid4())\n else:\n instance.slug = \"%s-%s\" % (orig, uuid.uuid4())\n\n instance.save()\n return instance\n\n\nclass StaffForm(forms.ModelForm):\n class Meta:\n model = Teacher\n exclude = ('school', 'user', 'slug', 'qualification', 'is_admin', 'position')\n\n\nclass PositionForm(forms.ModelForm):\n class Meta:\n model = Position\n exclude = ('school',)" }, { "alpha_fraction": 0.7670251131057739, "alphanum_fraction": 0.7670251131057739, "avg_line_length": 26.899999618530273, "blob_id": "536771b2ac5b7145f87b8fc1955fb9374879e4fb", "content_id": "d05ab6320965822c3c30d66b5466c6255ec435b3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "permissive", "max_line_length": 55, "num_lines": 10, "path": "/apps/results/templatetags/dict_filters.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import template\nregister = template.Library()\n\[email protected]\ndef get_dict_values(dict_collections, key, default=''):\n\treturn dict_collections[key]\n\[email protected]_tag('_pagination.html')\ndef render_paginator(object_list):\n\treturn {'object_list': object_list}\n" }, { "alpha_fraction": 0.5466464161872864, "alphanum_fraction": 0.5560309290885925, "avg_line_length": 45.4487190246582, "blob_id": "16e3f3b8893a8bf18704018750e0476196874fd8", "content_id": "5e1452a6b95550c71976340f20cf89fa9bdaff5b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7246, "license_type": "permissive", "max_line_length": 173, "num_lines": 156, "path": "/apps/insights/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-07-26 07:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('institutions', '0005_auto_20180622_1055'),\n ('students', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Assessment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('title', models.CharField(max_length=300, null=True)),\n ('assessment_type', models.PositiveIntegerField(choices=[(1, 'Grade Assessment'), (2, 'Demographic Assessment'), (3, 'Performance Assessment')], default=1)),\n ('assessment_year', models.DateField()),\n ('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school', to='institutions.Institution')),\n ('student_class', models.ManyToManyField(to='institutions.StudentClass')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='AssignGroupAssessment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='DataSheet',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=200, null=True)),\n ('assessment_type', models.PositiveIntegerField(choices=[(1, 'Grade Assessment'), (2, 'Demographic Assessment'), (3, 'Performance Assessment')], null=True)),\n ('file_type', models.CharField(choices=[('json', 'json'), ('csv', 'csv'), ('excel', 'excel')], max_length=20, null=True)),\n ('upload_file', models.FileField(null=True, upload_to='uploads/%Y/%m/%d')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Evaluation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='GroupAssessment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=255, null=True, unique=True)),\n ('description', models.TextField(blank=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Measure',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('assessment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insights.Assessment')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='MLModels',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ml_category', models.PositiveIntegerField(choices=[(1, 'Regression'), (2, 'Classification'), (3, 'Clustering')], null=True)),\n ('ml_type', models.CharField(max_length=100, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='PerformanceMeasure',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=100, null=True)),\n ('measure_type', models.PositiveIntegerField(choices=[(1, 'Attendance'), (2, 'Academics'), (3, 'Ratios'), (4, 'Faculty'), (5, 'Psychomotive')], null=True)),\n ],\n ),\n migrations.CreateModel(\n name='StashAssessment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('assessment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insights.Assessment')),\n ('measures', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insights.Measure')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='measure',\n name='measures',\n field=models.ManyToManyField(to='insights.PerformanceMeasure'),\n ),\n migrations.AddField(\n model_name='measure',\n name='school',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='evaluation',\n name='ml_models',\n field=models.ManyToManyField(to='insights.MLModels'),\n ),\n migrations.AddField(\n model_name='evaluation',\n name='school',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='assigngroupassessment',\n name='group',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insights.GroupAssessment'),\n ),\n migrations.AddField(\n model_name='assigngroupassessment',\n name='student',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='students.Student'),\n ),\n ]\n" }, { "alpha_fraction": 0.5898617506027222, "alphanum_fraction": 0.5898617506027222, "avg_line_length": 23.11111068725586, "blob_id": "7a1a86b23bd26d460f1ec44cce006cdc58e161b7", "content_id": "ba3ab2bdc10e51e6829d0ed9d31f99046c7ba98f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "permissive", "max_line_length": 56, "num_lines": 18, "path": "/apps/config/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Config\n\nclass SchoolSetupForm(forms.ModelForm):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(SchoolSetupForm, self).__init__(*args, **kwargs)\n\n\tclass Meta:\n\t\tmodel = Config\n\t\texclude = (\n 'school',\n 'current_performance',\n 'previous_performance',\n 'current_class_average',\n 'previous_class_average',\n\t\t\t'plan_changed'\n )\n" }, { "alpha_fraction": 0.6725663542747498, "alphanum_fraction": 0.6725663542747498, "avg_line_length": 18, "blob_id": "1a4b056597ea008f9c62b190b6aac2f19534d9f4", "content_id": "e142356c089ed144acdf3f3e2b5498fc2d8893ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "permissive", "max_line_length": 40, "num_lines": 6, "path": "/apps/config/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^setup$', setup, name='setup'),\n]" }, { "alpha_fraction": 0.500727117061615, "alphanum_fraction": 0.5123606324195862, "avg_line_length": 33.38333511352539, "blob_id": "830523037dbf1b14c8008bbd2d6d4d8c0ac8982f", "content_id": "cb23570ce322dff42782936f0e92d35a197a8c3f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "permissive", "max_line_length": 133, "num_lines": 60, "path": "/apps/states/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-03-22 11:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('code', models.CharField(max_length=3)),\n ('name', models.CharField(max_length=50)),\n ],\n options={\n 'verbose_name': 'Country',\n 'verbose_name_plural': 'Countries',\n 'ordering': ('name',),\n },\n ),\n migrations.CreateModel(\n name='LGA',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=30)),\n ],\n options={\n 'verbose_name': 'Nigerian Local Government Area',\n 'verbose_name_plural': 'Nigerian Local Government Areas',\n 'ordering': ('state', 'name'),\n },\n ),\n migrations.CreateModel(\n name='State',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('code', models.CharField(max_length=3)),\n ('name', models.CharField(max_length=30)),\n ],\n options={\n 'verbose_name': 'Nigerian State',\n 'verbose_name_plural': 'Nigerian States',\n 'ordering': ('name',),\n },\n ),\n migrations.AddField(\n model_name='lga',\n name='state',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='local_govt_areas', to='states.State'),\n ),\n ]\n" }, { "alpha_fraction": 0.5341726541519165, "alphanum_fraction": 0.5517585873603821, "avg_line_length": 49.040000915527344, "blob_id": "441fb271125acb201344c74b7a7ac568420f6039", "content_id": "d4a7cb0d6c8cfb2c5c829e8c25842ad04a7c9d4b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5004, "license_type": "permissive", "max_line_length": 139, "num_lines": 100, "path": "/apps/results/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('institutions', '0002_auto_20180523_0138'),\n ('staff', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Assignment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('session', models.CharField(blank=True, max_length=10, null=True)),\n ('date_created', models.DateField(auto_now_add=True, null=True)),\n ('date_modified', models.DateTimeField(auto_now=True, null=True)),\n ('assignment_score', models.DecimalField(decimal_places=2, default=0.0, max_digits=6, null=True)),\n ],\n options={\n 'ordering': ('-date_created',),\n },\n ),\n migrations.CreateModel(\n name='Examination',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('session', models.CharField(blank=True, max_length=10, null=True)),\n ('date_created', models.DateField(auto_now_add=True, null=True)),\n ('date_modified', models.DateTimeField(auto_now=True, null=True)),\n ('exam_score', models.DecimalField(decimal_places=2, default=0.0, max_digits=6, null=True)),\n ],\n options={\n 'ordering': ('-date_created',),\n },\n ),\n migrations.CreateModel(\n name='Grading',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('caption', models.CharField(max_length=15, null=True, unique=True)),\n ('grade_points', models.DecimalField(decimal_places=1, max_digits=2, null=True)),\n ('start', models.IntegerField(default=0, null=True)),\n ('end', models.PositiveIntegerField(default=100, null=True)),\n ],\n options={\n 'ordering': ('caption',),\n 'verbose_name_plural': 'Gradings',\n 'verbose_name': 'Grading',\n },\n ),\n migrations.CreateModel(\n name='Result',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('session', models.CharField(blank=True, max_length=10, null=True)),\n ('date_created', models.DateField(auto_now_add=True, null=True)),\n ('date_modified', models.DateTimeField(auto_now=True, null=True)),\n ('exam_score', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('test_score', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('assignment_score', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ],\n options={\n 'ordering': ('date_created',),\n },\n ),\n migrations.CreateModel(\n name='ResultBatch',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ],\n ),\n migrations.CreateModel(\n name='Test',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('session', models.CharField(blank=True, max_length=10, null=True)),\n ('date_created', models.DateField(auto_now_add=True, null=True)),\n ('date_modified', models.DateTimeField(auto_now=True, null=True)),\n ('test_score', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='staff.Teacher')),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ],\n options={\n 'ordering': ('-date_created',),\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6381418108940125, "alphanum_fraction": 0.645476758480072, "avg_line_length": 27.571428298950195, "blob_id": "701dc336f735243b2c579e81899a0b3c201a2eb0", "content_id": "ff2533eb723ffd4ca3a71b6aea94038a5c565fc4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "permissive", "max_line_length": 69, "num_lines": 14, "path": "/apps/awards/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Award(models.Model):\n title = models.CharField(max_length=50, null=True)\n icon = models.ImageField(upload_to='uploads/%Y/%m/%d', null=True)\n description = models.TextField()\n rating = models.PositiveIntegerField(default=0)\n\n def __str__(self):\n return self.title\n \n class Meta:\n ordering = ('rating',)\n \n " }, { "alpha_fraction": 0.6736723780632019, "alphanum_fraction": 0.6742353439331055, "avg_line_length": 31.29697036743164, "blob_id": "313ef4ef6952ba2c3e2cb27638f55cbefdbb9d0f", "content_id": "b6e6e370831e35e4c9c94c6c4047c21051ce1f8c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5329, "license_type": "permissive", "max_line_length": 112, "num_lines": 165, "path": "/apps/api/serializers.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom students.models import Student\nfrom institutions.models import StudentClass, Institution\nfrom results.models import Result\nfrom subjects.models import Subject\nfrom django.core import serializers as django_serializer\nfrom reports.models import Report\nfrom insights.models import Assessment, GroupAssessment\nfrom states.models import *\nfrom staff.models import *\nimport uuid\nfrom rest_framework.fields import CurrentUserDefault # This will really be cool, to get\n # the current logged in user\n\nclass SchoolSerializer(serializers.ModelSerializer):\n class Meta:\n model = Institution\n fields = \"__all__\"\n\n\nclass QualificationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Qualification\n fields = \"__all__\"\n\n\nclass TeacherSerializer(serializers.ModelSerializer):\n qualification = QualificationSerializer(read_only=True, many=True)\n class Meta:\n model = Teacher\n fields = \"__all__\"\n\n\nclass StudentClassSerializer(serializers.ModelSerializer):\n form_teacher = TeacherSerializer(read_only=True)\n class Meta:\n model = StudentClass\n fields = '__all__'\n\n\nclass GeoLocationSerializer(serializers.Serializer):\n country = serializers.CharField()\n cardinal_coordinate = serializers.CharField()\n nigeria_geozone = serializers.CharField()\n\n\nclass CountrySerializer(serializers.ModelSerializer):\n class Meta:\n model = Country\n fields = \"__all__\"\n\n \nclass StateSerializer(serializers.ModelSerializer):\n class Meta:\n model = State\n fields = \"__all__\"\n \n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'username', 'id']\n\n\nclass StudentSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n student_class = StudentClassSerializer(read_only=True)\n school = SchoolSerializer(read_only=True)\n state_of_residence = StateSerializer(read_only=True, many=True)\n country = CountrySerializer(read_only=True, many=True)\n state_of_origin = StateSerializer(read_only=True, many=True)\n \n class Meta:\n model = Student\n fields = \"__all__\"\n\n\nclass SubjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = Subject\n fields = \"__all__\"\n\n\nclass ResultSerializer(serializers.ModelSerializer):\n subject = SubjectSerializer(read_only=True)\n class Meta:\n model = Result\n fields = (\"student\", \"subject\", \"exam_score\", \"test_score\", \"assignment_score\")\n \n\nclass PerformanceSerializer(serializers.Serializer):\n student = serializers.CharField()\n student_class = serializers.CharField()\n age = serializers.IntegerField()\n percentile = serializers.DecimalField(max_digits=4, decimal_places=2)\n ranking = serializers.IntegerField()\n\n\nclass ReportSerializer(serializers.ModelSerializer):\n class Meta:\n model = Report\n fields = \"__all__\"\n\n\nclass SubjectEnrollmentByClassSerializer(serializers.Serializer):\n student_class = serializers.CharField()\n subject = serializers.CharField()\n number_of_enrollments = serializers.IntegerField()\n\n\nclass AssessmentSerializer(serializers.ModelSerializer):\n students = serializers.SerializerMethodField()\n reports = serializers.SerializerMethodField()\n performances = serializers.SerializerMethodField()\n\n def get_students(self, obj):\n request = self.context['request']\n user = None\n teacher = None\n if request and hasattr(request, 'user'):\n user = request.user\n if hasattr(user, 'teacher'):\n teacher = user.teacher\n students = Student.objects.filter(school=teacher.school, student_class=obj.student_class.get_queryset())\n student_serializer = StudentSerializer(students, many=True)\n return student_serializer.data\n \n def get_reports(self, obj):\n return []\n\n def get_performances(self, obj):\n return []\n\n class Meta:\n model = Assessment\n fields = (\"id\", \"uuid\", \"title\", \"assessment_type\", \n \"assessment_year\", \"school\", \"student_class\", \n \"reports\", \"performances\",\n \"students\")\n \n def create(self, validated_data):\n class_list = validated_data.pop('student_class') # Will return an array of numbers for class_ids\n school = validated_data.pop('school')\n assessment = Assessment.objects.create(school=school, **validated_data)\n assessment.uuid = \"\".join(\"{}\".format(uuid.uuid4()).split('-'))\n assessment.save()\n for klass in class_list:\n assessment.student_class.add(klass)\n return assessment\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n counter = serializers.SerializerMethodField()\n class Meta:\n model = GroupAssessment\n fields =( \"id\", \"name\", \"description\", \"school\", \"counter\")\n \n def get_counter(self, obj):\n request = self.context['request']\n return obj.students.count()\n \n def create(self, validated_data):\n group = GroupAssessment.objects.create(**validated_data)\n return group\n" }, { "alpha_fraction": 0.6358974575996399, "alphanum_fraction": 0.6427350640296936, "avg_line_length": 31.5, "blob_id": "7504ac080ecd9dca33989e05e35396ce230b1e44", "content_id": "5d07c766acf0501261c74d83f059043b085d5ea4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "permissive", "max_line_length": 98, "num_lines": 18, "path": "/apps/staff/utils.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from .models import Teacher\n\n\ndef generate_staff_id(school):\n '''\n Generate staff_ids for staff using their \n school short code\n '''\n all_teachers = Teacher.objects.filter(school=school)\n id_list = [int(teacher.staff_id[-3:]) for teacher in all_teachers if teacher.staff_id != None]\n id_list.append(0) #Just if the list is empty\n last_index = max(id_list)\n next_index = \"{}\".format(last_index+1).zfill(3)\n school_code = school.short_code.upper()\n \n chars = [school_code,\"STAFF\",next_index]\n staff_id = '/'.join(chars)\n return staff_id\n" }, { "alpha_fraction": 0.5657708644866943, "alphanum_fraction": 0.5719943642616272, "avg_line_length": 35.44329833984375, "blob_id": "b2126de044f5c57ce27d9ab66b09ef1d9fc558f8", "content_id": "383d73693c6f65223da30cd63ade5f7639998a7e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3535, "license_type": "permissive", "max_line_length": 138, "num_lines": 97, "path": "/apps/students/utils.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nimport uuid\nfrom django.db import transaction\nfrom .models import Student\nfrom institutions.models import StudentClass\nfrom config.models import StudentConfig\nimport datetime\nfrom config.utils import Limit\n\[email protected]\ndef import_student_from_csv(csv_file, teacher):\n \"\"\"\n Import student CSV data.\n\n \"\"\"\n school = teacher.school\n limit = Limit(school.config.plan, school)\n csv_data = []\n ifile = csv_file.read().decode(\"utf-8\")\n for row in ifile.split('\\n'):\n csv_data.append(row.split(','))\n\n result_objects = []\n # Check if headers exists. Skip the first entry if true.\n header_check = ['reg_number', 'surname', 'first_name', 'sex', 'level', 'department', 'year_of_admission', 'course_duration']\n first_row = [i.lower().strip() for i in csv_data[0]]\n # if all(i in first_row for i in header_check):\n csv_data = csv_data[1:]\n\n new_value = 0\n existing_value = 0 # To get the number of records entered\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n if row:\n student_class = StudentClass.objects.get(class_code=row[4])\n if Student.objects.filter(reg_number=row[0]).exists():\n existing_value+=1\n else:\n if limit.limit_reached(Student):\n raise ValueError(\"You have reached the maximum number of students you can add. Ugrade your plan to add more students\")\n else:\n reg_number = None\n if row[0] == 'NULL':\n reg_number = generate_reg_number(teacher.school)\n else:\n reg_number = row[0]\n user = User.objects.create_user(reg_number, password=reg_number, first_name=row[1], last_name=row[2])\n user.save()\n\n student = Student.objects.create(\n reg_number =reg_number,\n user=user,\n last_name = row[1],\n first_name = row[2],\n school=teacher.school,\n student_class=student_class,\n gender=row[3],\n )\n setup = StudentConfig(student=student)\n setup.save()\n #Create a slug for the student\n create_slug(student)\n new_value+=1\n else:\n pass\n return new_value, existing_value\n\n\ndef create_slug(instance):\n #import slugify\n from django.utils.text import slugify\n\n orig = slugify(instance.last_name)\n if Student.objects.filter(slug=instance.slug).exists():\n instance.slug = \"%s-%s\" % (orig, uuid.uuid4())\n else:\n instance.slug = \"%s-%s\" % (orig, uuid.uuid4())\n\n instance.save()\n\n\ndef generate_reg_number(school):\n '''\n Generate reg_numbers for students using their\n school short code and year of registration\n '''\n year = \"{}\".format(datetime.date.today().year)\n all_students = Student.objects.filter(school=school, date_created__year=year)\n id_list = [int(student.reg_number[-4:]) for student in all_students]\n id_list.append(0) #Just if the list is empty\n last_index = max(id_list)\n next_index = \"{}\".format(last_index+1).zfill(4)\n school_code = school.short_code.upper()\n\n chars = [school_code,year,next_index]\n reg_number = '/'.join(chars)\n return reg_number\n" }, { "alpha_fraction": 0.6472727060317993, "alphanum_fraction": 0.6472727060317993, "avg_line_length": 60.11111068725586, "blob_id": "273704e2de6a9ff629c91d9f7c71436b53ae5d60", "content_id": "4adfa4a40647f18bb01051cd8ace947ae22b8b8b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1650, "license_type": "permissive", "max_line_length": 110, "num_lines": 27, "path": "/apps/reports/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n #Reports\n url(r'^batch/(?P<batch_id>\\d+)$', reports, name='list'),\n #url(r'^student/(?P<batch_id>\\d+)$', student_reports, name='student-report'),\n url(r'^my-report-cards$', student_reports, name='own-report'),\n url(r'^remarks/(?P<batch_id>\\d+)$', batch_remark, name='remark'),\n url(r'^delete/(?P<batch_id>\\d+)$', delete_batch, name='delete-batch'),\n url(r'^delete/broadsheet/(?P<broadsheet_id>\\d+)$', delete_broadsheet, name='delete-broadsheet'),\n url(r'^delete/report/(?P<report_id>\\d+)$', delete_report, name='delete-report'),\n url(r'^batch-list$', ReportBatchListView.as_view(), name='batch'),\n url(r'^broadsheets$', BroadSheetListView.as_view(), name='broadsheet'),\n url(r'^new/report$', new_report, name='new-report'),\n url(r'^new/broadsheet$', new_broadsheet, name='new-broadsheet'),\n url(r'^generate/report$', generate_report, name='generate-report'),\n url(r'^generate/broadsheet$', generate_broadsheet, name='generate-broadsheet'),\n url(r'^pdf-report/(?P<student_id>\\d+)/(?P<term>\\d+)/(?P<class_id>\\d+)$', print_report, name='pdf-report'),\n url(r'^broadsheet/(?P<broadsheet_id>\\d+)$', print_broadsheet, name='broadsheet-report'),\n url(r'^remark/(?P<card_id>\\d+)$', single_remark, name='single-remark'),\n url(r'^export$', export_main, name='export'),\n url(r'^psychomotors$', psycho_domain, name='cognitive-domain'),\n url(r'^api/cognitive-data$', load_cog_data, name='cognitive-data'),\n url(r'^export/data$', export_filters, name='export-data'),\n url(r'^api/load-token$', load_token, name='load-token')\n]\n" }, { "alpha_fraction": 0.681649923324585, "alphanum_fraction": 0.688524603843689, "avg_line_length": 30.008195877075195, "blob_id": "655cb827ac64f1d705cf5cef4131dcc4937dad24", "content_id": "27cbd3ded479b27fc436ae940838c4f2bfef2c8c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3782, "license_type": "permissive", "max_line_length": 85, "num_lines": 122, "path": "/apps/insights/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Constants and model choices\nGRADES, DEMOGRAPHICS, PERFORMANCE = range(1,4)\nASSESSMENT_TYPE = (\n (GRADES, 'Grade Assessment'),\n (DEMOGRAPHICS, 'Demographic Assessment'),\n (PERFORMANCE, 'Performance Assessment')\n)\n\nREGRESSION, CLASSIFICATION, CLUSTER = range(1,4)\nML_CHOICES = (\n (REGRESSION, 'Regression'),\n (CLASSIFICATION, 'Classification'),\n (CLUSTER, 'Clustering')\n)\n\nATTENDANCE, ACADEMICS, RATIOS, FACULTY, PSYCHOMOTIVE = range(1,6)\nMEASURE_TYPE = (\n (ATTENDANCE, 'Attendance'),\n (ACADEMICS, 'Academics'),\n (RATIOS, 'Ratios'),\n (FACULTY, 'Faculty'),\n (PSYCHOMOTIVE, 'Psychomotive'),\n)\n\nJSON, CSV, EXCEL = ('json', 'csv', 'excel')\nFILE_TYPE = (\n (JSON, 'json'),\n (CSV, 'csv'),\n (EXCEL, 'excel')\n)\n\n\nclass TimeStampModel(models.Model):\n school = models.ForeignKey('institutions.Institution', null=True)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\n# Insight Models\nclass Assessment(TimeStampModel):\n uuid = models.CharField(max_length=50, null=True)\n title = models.CharField(max_length=300, null=True)\n assessment_type = models.PositiveIntegerField(choices=ASSESSMENT_TYPE, default=1)\n assessment_year = models.DateField()\n student_class = models.ManyToManyField('institutions.StudentClass')\n\n def __str__(self):\n return \"{}\".format(self.title)\n\n def get_assessment_year(self):\n return self.assessment_year.strftime('%Y')\n \n def get_assessment_type(self):\n assessment_dict = dict(ASSESSMENT_TYPE)\n return assessment_dict[self.assessment_type]\n \n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('insights:load-assessment', kwargs={'uuid': self.uuid})\n\n\nclass Evaluation(TimeStampModel):\n ml_models = models.ManyToManyField('insights.MLModels')\n\n def __str__(self):\n return \"Evaluation for {}\".format(self.date_created)\n\n\nclass MLModels(models.Model):\n ml_category = models.PositiveIntegerField(choices=ML_CHOICES, null=True)\n ml_type = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return \"{}-{}\".format(self.ml_type, self.ml_category)\n \n\nclass Measure(TimeStampModel):\n assessment = models.ForeignKey('insights.Assessment', null=True)\n measures = models.ManyToManyField('insights.PerformanceMeasure')\n\n def __str__(self):\n return \"Measures for {}\".format(self.assessment)\n\nclass PerformanceMeasure(models.Model):\n title = models.CharField(max_length=100, null=True)\n measure_type = models.PositiveIntegerField(choices=MEASURE_TYPE, null=True)\n\n def __str__(self):\n return \"{}-{}\".format(self.title, self.measure_type)\n\n\nclass DataSheet(TimeStampModel):\n name = models.CharField(max_length=200, null=True)\n assessment_type = models.PositiveIntegerField(choices=ASSESSMENT_TYPE, null=True)\n file_type = models.CharField(max_length=20, choices=FILE_TYPE, null=True)\n upload_file = models.FileField(upload_to='uploads/%Y/%m/%d', null=True)\n\n def __str__(self):\n return \"{} - {}\".format(self.name, self.assessment_type)\n\n\n\nclass StashAssessment(TimeStampModel):\n assessment = models.ForeignKey('insights.Assessment', null=True)\n measures = models.ForeignKey('insights.Measure', null=True)\n\n def __str__(self):\n return \"Stashed Assessment for {}\".format(self.assessment)\n\n\nclass GroupAssessment(TimeStampModel):\n students = models.ManyToManyField('students.Student', blank=True, null=True)\n name = models.CharField(max_length=255, null=True, unique=True)\n description = models.TextField(blank=True)\n\n def __str__(self):\n return self.name" }, { "alpha_fraction": 0.5503957867622375, "alphanum_fraction": 0.5551451444625854, "avg_line_length": 33.8834342956543, "blob_id": "56ed5bfa6463e7366b60703a4aeb14df644ea311", "content_id": "6f6a582c3798d5be5d96e11af1f707ae68d40893", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5685, "license_type": "permissive", "max_line_length": 117, "num_lines": 163, "path": "/apps/payments/utils.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from payments.models import AccessCard, AccessToken\nfrom config.utils import pin_generator\nfrom students.models import Student\nimport requests\nimport webbrowser\nimport os\ntry:\n import simplejson as json\nexcept:\n import json\n\ndef tokens(school_token, school,session,term):\n students = Student.objects.get_active_students(school=school)\n result = None\n access_token = AccessToken.objects.filter(token=school_token.token)\n #check if the token already exists\n if access_token.exists():\n access_token = access_token[0]\n if access_token.token_application.session != session:\n return False\n if access_token.token_application.term != int(term):\n return False\n try:\n for student in students:\n AccessCard.objects.create(\n student=student,\n access_code='PIN-'+pin_generator(length=12),\n term=access_token.token_application.term,\n year=access_token.token_application.year,\n school_id=school.registration_id,\n school_token=access_token,\n session=access_token.token_application.session\n )\n result = True\n except Exception as e:\n raise ValueError(e)\n return result\n\n\nclass PaystackTransaction:\n\n def __init__(self, api_url='https://api.paystack.co/', secret_key=None):\n self._base_url = api_url\n self._key = secret_key\n if secret_key:\n self._base_url = api_url\n else:\n try:\n self._key = os.environ.get('PAYSTACK_SECRET_KEY', 'sk_test_2757e92b4cd26e9b5d54f361b8b4dec3e6ec410d')\n except Exception as e:\n raise ValueError(e)\n self._CONTENT_TYPE = \"application/json\"\n self.authorization_url = None\n \n def _url(self, path):\n return self._base_url + path\n\n def _headers(self):\n return { \n \"Content-Type\": self._CONTENT_TYPE, \n \"Authorization\": \"Bearer \" + self._key\n }\n \n def _handle_request(self, method, url, data=None):\n try:\n response = requests.request(method, url, headers=self._headers(), data=data)\n if response.json().get('status'):\n return response\n except Exception as e:\n raise ValueError(e)\n \n def initialize(self, email, amount, reference=None, callback_url=None):\n \"\"\"\n Initialize a transaction and returns the response\n \n ARGS:\n email --> Customer's email address\n amount --> Amount to charge\n plan --> optional\n Reference --> optional\n \"\"\"\n\n if not email:\n raise InvalidDataError(\"Customer's Email is required for initialization\") \n\n if reference:\n reference = \"_\".join(reference.split('-'))\n url = self._url(\"/transaction/initialize\")\n payload = {\n \"email\":email,\n \"amount\": amount,\n \"callback_url\": \"{}\".format(callback_url),\n \"reference\": reference\n }\n response = self._handle_request(\"post\", url, json.dumps(payload)).json()\n self.authorization_url = response['data'].get('authorization_url')\n return response\n\n def charge(self, email, auth_code, amount, reference=None):\n \"\"\"\n Charges a customer and returns the response\n \n ARGS:\n auth_code --> Customer's auth code\n email --> Customer's email address\n amount --> Amount to charge\n reference --> optional\n \"\"\"\n\n if not email:\n raise InvalidDataError(\"Customer's Email is required to charge\")\n\n if not auth_code:\n raise InvalidDataError(\"Customer's Auth code is required to charge\") \n \n url = self._url(\"/transaction/charge_authorization\")\n payload = {\n \"authorization_code\":auth_code, \n \"email\":email, \n \"amount\": amount,\n \"reference\": reference\n }\n data = json.dumps(payload)\n return self._handle_request('POST', url, data)\n\n def verify(self, reference):\n \"\"\"\n Verifies a transaction using the provided reference number\n\n args:\n reference -- reference of the transaction to verify\n \"\"\"\n \n reference = str(reference)\n url = self._url(\"/transaction/verify/{}\".format(reference))\n return self._handle_request('GET', url)\n\n def authorize(self, auth_url=None): # pragma: no cover\n \"\"\"\n Open a browser window for client to enter card details.\n Args:\n auth_url (string, optional): Paystack verified authorization_url\n Raises:\n e: Browser Error :(\n error.ValidationError: Bad input.\n Returns:\n None\n \"\"\"\n if not auth_url and not self.authorization_url:\n raise error.ValidationError('This transaction object does not'\n ' have an authorization url.You must'\n ' provide an authorization url or'\n 'for call the `initialize` method'\n ' this transaction first.')\n\n authorization_url = (\n lambda auth_url: auth_url if auth_url else self\n .authorization_url)(auth_url)\n\n try:\n webbrowser.open(authorization_url, new=0, autoraise=True)\n except webbrowser.Error as e:\n raise e" }, { "alpha_fraction": 0.49659863114356995, "alphanum_fraction": 0.5296404361724854, "avg_line_length": 33.33333206176758, "blob_id": "a2fdee81622719d00a79d1a182ea1b0bb1a731cd", "content_id": "48f0618b6c837b3fa695cd67d2b8036d029ae188", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1029, "license_type": "permissive", "max_line_length": 147, "num_lines": 30, "path": "/sani_app/templates/password_reset_done.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'base_auth.html' %}\n{% load static %}\n{% block title %}Password Reset Confirmed{% endblock %}\n{% block inner %}\n<style>\ninput {\n border-radius: 4px;\n width: 70%;\n}\n</style>\n<div class=\"container\">\n <div style=\"text-align: center; margin-top: 55px;\">\n <a class=\"logo\" href=\"{% url 'home' %}\">\n <img src=\"{% static 'img/logo.png' %}\" alt=\"logo\" width=\"150\">\n </a>\n <h2 style=\"color:#065174; font-weight: 200;\">Password Reset Successful</h2>\n </div>\n <div class=\"row\">\n <div class=\"col-md-6\" style=\"position: absolute; right: 0; left: 0; top: 190px; padding: 15px; border: 1px solid #f5f5f5; margin: 0 auto;\">\n <div>\n <div class=\"panel panel-default\">\n <div class=\"panel-body\">\n <h4>An email has been sent to your email address. Login to your mail to complete the reset process</h4>\n </div>\n </div>\n </div>\n </div>\n </div>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.4125053286552429, "alphanum_fraction": 0.42210841178894043, "avg_line_length": 42.39814758300781, "blob_id": "d32c97653bd0b60e62fccb114b1057237287e63e", "content_id": "d65c1a938d268b0f34e8f3121f2f77f25396de4d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4686, "license_type": "permissive", "max_line_length": 228, "num_lines": 108, "path": "/apps/reports/templates/reports/remarks.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'reports/report_batch.html' %}\n{% block extracss %}\n<style>\n input, select, textarea{\n height: 30px !important;\n font-size: 14px;\n }\n</style>\n{% endblock %}\n{% block inner-content %}\n<div id=\"class-page-content\">\n\n <div class=\"teachers table\">\n <div class=\"header\">\n <h1>Remarks</h1>\n <div class=\"separator\"></div>\n </div>\n </div>\n <div class=\"\">\n <form action=\"\" method=\"POST\">{% csrf_token %}\n <div class=\"panel sub-panel add-padding\" style=\"border-radius: 4px; border: 1px solid #e5e5e5; padding: 10px;\">\n <div class=\"row\">\n <div class=\"col-md-3\">\n <div class=\"form-group\">\n <label for=\"id_subject\">Resumption Date</label>\n <input class=\"form-control in-search input-sm\" type=\"date\" value=\"{{batch.school_resume_date|date:\"Y-m-d\"}}\" name=\"resume_date\" id=\"id_date\"> \n </div>\n </div>\n <div class=\"col-md-3\">\n <div class=\"form-group\">\n <label for=\"id_class\">Term</label>\n <input name class=\"form-control in-search input-sm\" type=\"text\" disabled=\"disabled\" value=\"{{batch.term}}\">\n </div>\n </div>\n <div class=\"col-md-3\">\n <div class=\"form-group\">\n <label for=\"id_term\">Session</label>\n <input class=\"form-control in-search input-sm\" type=\"text\" disabled=\"disabled\" value=\"{{batch.session}}\">\n </div>\n </div>\n </div>\n </div>\n <div class=\"sub-panel add-padding\">\n {{batch_remark_formset.management_form}}\n {% for batch_form in batch_remark_formset %}\n <div class=\"form-group batch_formset\" style=\"border-radius: 4px; border: 1px solid #e5e5e5; padding-bottom: 10px;\">\n <div class=\"row\" style=\"padding: 10px 15px;\">\n <div class=\"col-md-3\">\n <label for=\"\">Student</label>\n {{batch_form.student}}\n {{batch_form.id}}\n </div>\n <div class=\"col-md-3\">\n <label for=\"\">Form Teacher Remarks</label>\n {{batch_form.form_teacher_remark}}\n <br>\n </div>\n {% with request.user.teacher as teacher %}\n {% if teacher.is_admin %}\n <div class=\"col-md-3\">\n <label for=\"\">Head Remarks</label>\n {{batch_form.head_remark}}\n <br>\n </div>\n <div class=\"col-md-1\">\n <label for=\"\">Verify?</label>\n <span style=\"\"><label class=\"toggle\">\n <input hidden=\"hidden\" id=\"{{batch_form.verified.id_for_label}}\" {% if batch_form.verified.value %}checked{% endif %} type=\"checkbox\" name=\"{{batch_form.verified.html_name}}\" class=\"toggle_input\">\n <div class=\"toggle_control\"></div>\n </label></span>\n <br>\n </div>\n {% if batch.term == '3' %}\n <div class=\"col-md-2\">\n <label for=\"\">Promoted To...</label>\n {{batch_form.promoted_to}}\n <br>\n </div>\n {% endif %}\n {% endif %}\n {% endwith %}\n </div>\n </div>\n {% endfor %}\n </div>\n <div class=\"sub-panel add-padding\" style=\"border-radius: 4px; border: 1px solid #e5e5e5; padding: 10px; margin-top: 5px;\">\n\t\t\t\t<button class=\"btn btn-main\" type=\"submit\">Save Remarks</button>\n\t\t\t</div>\n </form>\n </div>\n</div>\n{% endblock %}\n{% block extrajs %}\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery.formset/1.2.0/jquery.formset.js\"></script>\n<script>\n $('#id_student').select2();\n</script>\n<script>\n\t$('.batch_formset').formset({\n\t\taddText: 'Add',\n\t\tdeleteText: 'Remove',\n });\n\n $('#id_class').change(function() {\n $('#btn_filter').click();\n })\n</script>\n{% endblock %}" }, { "alpha_fraction": 0.5582503080368042, "alphanum_fraction": 0.5627255439758301, "avg_line_length": 30.779815673828125, "blob_id": "2b9d2ab3c94cfe8d219c72671faada10db960fa6", "content_id": "63ab0db4485c18f0dcfdf9869c362c2c5c7f29e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6927, "license_type": "permissive", "max_line_length": 107, "num_lines": 218, "path": "/apps/config/utils.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from decimal import Decimal\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail, EmailMultiAlternatives\nfrom smtplib import SMTPException\nimport random\n\n\ndef create_user(first_name, last_name):\n \"\"\"\n Creates a user with a username generated from the supplied\n\n ARGS:\n `first_name` and `last_name`.\n\n DESCRIPTION:\n Creates a user profile\n\n \"\"\"\n user = None\n user = User.objects.create(first_name=first_name, last_name=last_name)\n return user\n\n\ndef decimal_add(x, y):\n '''\n This is an operator for a decimal addition.\n '''\n return Decimal(x) + Decimal(y)\n\n\nclass ChartData(object):\n\n @classmethod\n def all_result(cls, student, clazz=None, term=None):\n data = {'subject': [], 'score': []}\n\n @classmethod\n def class_average(cls, results, student_class):\n data = {'class': [], 'avg': [], 'best': []}\n for clazz in student_class:\n score = []\n best = 0\n for res in results:\n if res.student_class == clazz:\n if res.get_score > best:\n best = res.get_score\n score.append(res.get_score)\n data['class'].append(\"{}\".format(clazz))\n try:\n get_avg = \"%.2f\" % (sum(score)/len(score))\n data['avg'].append(float(get_avg))\n data['best'].append(float(best))\n except ZeroDivisionError:\n data['avg'].append(0.0)\n data['best'].append(0.0)\n return data\n\n @classmethod\n def class_demographic(cls, students, student_class):\n data = {'class': [], 'male': [], 'female': []}\n for clazz in student_class:\n male = 0\n female = 0\n for student in students.filter(student_class=clazz):\n if student.gender == 'M':\n male+=1\n elif student.gender == 'F':\n female+=1\n data['class'].append(\"{}\".format(clazz))\n data['male'].append(convert_to_percent(male, students.filter(student_class=clazz).count()))\n data['female'].append(convert_to_percent(female, students.filter(student_class=clazz).count()))\n return data\n\n\n @classmethod\n def current_report(cls, student, results):\n data = {'subjects': [], 'first term': [], 'second term': [], 'third term': []}\n terms = ['first term', 'second term', 'third term']\n for i in range(len(terms)):\n for result in results.filter(term=(i+1)):\n data['subjects'].append(result.subject.short_code)\n data[terms[i]].append(float(result.get_score))\n print(data)\n return data\n\n\n\ndef convert_to_percent(value, total):\n try:\n percent = \"%.2f\" % ((value/total)*100)\n return float(percent)\n except ZeroDivisionError:\n return 0.0\n\n\ndef pin_generator(length=8):\n \"\"\"Generates alphanumeric ids or keys\n \n Keyword Arguments:\n length {int} -- length of the pin (default: {8})\n \n Returns:\n {string} -- the pin generated\n \"\"\"\n #List of characters to generate token from\n num = '0123456789'\n upper_alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n lower_alpha = upper_alpha.lower()\n gen_base = [num,upper_alpha,lower_alpha]\n alphanum = ''.join(gen_base)\n\n token = ''\n\n for x in range(length):\n token += alphanum[random.randint(1,len(alphanum)-1)]\n return token\n\n\nclass Limit:\n\n def __init__(self, plan, school):\n self._plan = plan\n self._exceeded = False\n self._school = school\n\n def limit_reached(self, model):\n limits = {\n 'student': self._plan.student_limit,\n 'teacher': self._plan.staff_limit,\n 'result': self._plan.result_limit\n }\n get_queryset = model.objects.filter(school=self._school)\n if get_queryset.count() >= limits[model._meta.verbose_name.lower()]:\n return True\n return False\n\n\nclass MailHandler(object):\n\n def __init__(self, context=None):\n \"\"\"MailHandler constructor\n \n Keyword Arguments:\n context {dict} -- context dictionary for the mail (default: {None})\n \"\"\"\n\n if context is not None:\n self._context_dict = context\n self._txt_message = None\n self._html_message = None\n self._html_template = None\n self._txt_template = None\n\n def initialize_templates(self, txt_template, html_template, render_to_string):\n \"\"\"Initialize and add all the necessary templates for the mail\n \n Arguments:\n txt_template {string} -- text file\n html_template {string} -- html file\n render_to_string {function} -- render_to _string function as a dependency injection\n \n Returns:\n boolean -- reponse status\n \"\"\"\n try:\n # initialize all the temlpates and messages\n self._txt_template = txt_template\n self._html_template = html_template\n self._txt_message = render_to_string(self._txt_template, self._context_dict)\n self._html_message = render_to_string(self._html_template, self._context_dict)\n self._txt_message = render_to_string(self._txt_template, self._context_dict)\n self._html_message = render_to_string(self._html_template, self._context_dict)\n except:\n return False\n return True\n\n def simple_mail(self, subject, message, sender, recipients, cc=None):\n \"\"\"Send a simple mail using the django send_mail function\n \n Arguments:\n subject {string} -- the subject of the mail\n message {string} -- the body of the mail\n from_email {string} -- sender email\n to {string} -- recipient email\n \"\"\"\n all_recipients = [recipients]\n \n if cc:\n all_recipients.append(cc)\n try:\n send_mail(subject, message, sender, all_recipients, fail_silently=False)\n except SMTPException as e:\n return False\n return True\n\n \n def mime_mail(self, subject, sender, recipients, cc=None):\n \"\"\"Send a mail with context\n \n Arguments:\n subject {string} -- the subject of the mail\n from_email {string} -- sender email address\n to {string} -- recipient email address\n \n Returns:\n boolean -- status\n \"\"\"\n all_recipients = [recipients]\n \n if cc:\n all_recipients.append(cc)\n try:\n msg = EmailMultiAlternatives(subject, self._txt_message, from_email, all_recipients)\n msg.attach_alternative(self._html_message, \"text/html\")\n msg.send()\n return True\n except SMTPException:\n return False" }, { "alpha_fraction": 0.7266666889190674, "alphanum_fraction": 0.7266666889190674, "avg_line_length": 28.933332443237305, "blob_id": "4dbd73b789fcacf7b67f83e8ea8b39e118426183", "content_id": "b5b0b3c5d9798a188b2ee85c7cfdb58bafad3286", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "permissive", "max_line_length": 76, "num_lines": 15, "path": "/apps/api/errors.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "\nclass InsightAPIError(Exception):\n \"\"\"This is a base error for the Insight API\"\"\"\n pass\n\nclass InvalidDataError(InsightAPIError):\n \"\"\"This is called when there is an invalid data passed to the the API\"\"\"\n pass\n\nclass MissingAuthorizationKeyError(InsightAPIError):\n \"\"\"There is no authorization key found\"\"\"\n pass\n\nclass NotImpementedError(InsightAPIError):\n \"\"\"This method needs to be implemented in the child class\"\"\"\n pass\n" }, { "alpha_fraction": 0.5685483813285828, "alphanum_fraction": 0.600806474685669, "avg_line_length": 25.571428298950195, "blob_id": "bc0d60337c4d5907fc687e656b0d19dc48eda568", "content_id": "8015556e6587b65802fb5d379a9ebe4fdfae64ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "permissive", "max_line_length": 115, "num_lines": 28, "path": "/apps/payments/migrations/0002_auto_20180523_0138.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('students', '0001_initial'),\n ('payments', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='accesscard',\n name='student',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='students.Student'),\n ),\n migrations.AlterUniqueTogether(\n name='accesscard',\n unique_together=set([('student', 'term', 'year')]),\n ),\n ]\n" }, { "alpha_fraction": 0.5538461804389954, "alphanum_fraction": 0.5942307710647583, "avg_line_length": 25, "blob_id": "4dfb900327ed86f153e2d77604fa3d3a7c7f2c8e", "content_id": "7c12750fe25629b919d6256200867bb8acc5ebd8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "permissive", "max_line_length": 111, "num_lines": 20, "path": "/apps/students/migrations/0003_auto_20180923_1629.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-09-23 14:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('students', '0002_student_special_aids'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='student',\n name='special_aids',\n field=models.CharField(choices=[('Deaf', 'Deaf'), ('Blind', 'Blind')], default='No', max_length=5),\n ),\n ]\n" }, { "alpha_fraction": 0.5386715531349182, "alphanum_fraction": 0.5457208156585693, "avg_line_length": 36.84774398803711, "blob_id": "8775747700081c2a28df6e98b9c2375612df93a1", "content_id": "168674113928e086dfffcd5bcfcce735c441d2f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20144, "license_type": "permissive", "max_line_length": 134, "num_lines": 532, "path": "/apps/results/utils.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from results.models import *\nfrom django.db import transaction\nfrom subjects.models import Subject\nfrom students.models import Student\nfrom institutions.models import StudentClass\nimport operator\nfrom collections import OrderedDict\nfrom config.utils import Limit\nfrom django.views.decorators.cache import cache_page\n\n\ndef pin_generator(length=8):\n \n '''\n This is to generate alphanumeric ids. \n the addition of non-alphameric chars increases the uniqueness of the pin \n e.g Qw21#d seem more unique than 1242\n \n It can also serve as a secrete key generator of any length.\n '''\n \n import random\n \n num = '0123456789'\n chars = '@#$&'\n upper_alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n lower_alpha = upper_alpha.lower()\n gen_base = [num,chars,upper_alpha,lower_alpha]\n alphanum = ''.join(gen_base)\n \n pin = ''\n \n # generate some random characters from the alphanum string\n for x in range(length):\n pin += alphanum[random.randint(1,len(alphanum)-1)]\n \n return pin \n\n\ndef update_results(school):\n for result in Result.objects.filter(school=school):\n result.save()\n\n\[email protected]\ndef import_test_from_csv(csv_file, teacher, is_admin=False):\n \"\"\"\n Import result CSV data.\n\n We'll process all rows first and create Result model objects from them\n and perform a bulk create. This way, no records will be inserted unless\n all records are good.\n \n \"\"\"\n school = teacher.school\n limit = Limit(school.config.plan, school)\n csv_data = []\n ifile = csv_file.read().decode(\"utf-8\")\n for row in ifile.split('\\n'):\n csv_data.append(row.split(','))\n \n result_objects = []\n # Check if headers exists. Skip the first entry if true.\n header_check = ['student', 'subject', 'test_score', 'class', 'term', 'session']\n first_row = [i.lower().strip() for i in csv_data[0]]\n # if all(i in first_row for i in header_check):\n csv_data = csv_data[1:]\n \n new_value = 0 # To get the number of records entered\n update_value = 0\n\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n if row:\n if limit.limit_reached(Result):\n raise ValueError(\"You have exceeded the number of results you can upload. Upgrade your plan for more features\")\n else:\n student = Student.objects.get(reg_number=row[0])\n subject = Subject.objects.get(short_code = str(row[1]).upper())\n student_class = StudentClass.objects.get(class_code=row[3])\n existing = Test.objects.filter(student=student, subject=subject, student_class=student_class,term=row[4])\n if existing.count() > 0:\n if existing[0].test_score == 0.0:\n existing[0].test_score = row[2]\n existing[0].save()\n update_value+=1\n else:\n Test.objects.create(\n student=student,\n subject=subject,\n school=teacher.school,\n test_score=row[2],\n student_class=student_class,\n term=row[4],\n session=row[5],\n )\n new_value+=1\t\t\n return new_value, update_value\n\n\[email protected]\ndef import_all_from_csv(csv_file, teacher, is_admin=False):\n \"\"\"\n Import result CSV data.\n\n We'll process all rows first and create Result model objects from them\n and perform a bulk create. This way, no records will be inserted unless\n all records are good.\n \n \"\"\"\n school = teacher.school\n limit = Limit(school.config.plan, school)\n csv_data = []\n ifile = csv_file.read().decode(\"utf-8\")\n for row in ifile.split('\\n'):\n csv_data.append(row.split(','))\n \n result_objects = []\n # Check if headers exists. Skip the first entry if true.\n header_check = ['student', 'subject', 'assignment_score', 'test_score', 'exam_score', 'class', 'term', 'session']\n first_row = [i.lower().strip() for i in csv_data[0]]\n if all(i in first_row for i in header_check):\n csv_data = csv_data[1:]\n \n new_value = 0 # To get the number of records entered\n update_value = 0\n\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n if row:\n if limit.limit_reached(Result):\n raise ValueError(\"You have exceeded the number of results you can upload. Upgrade your plan for more features\")\n else:\n student = Student.objects.get(reg_number=row[0])\n subject = Subject.objects.get(short_code=str(row[1]).upper())\n student_class = StudentClass.objects.get(class_code=row[5])\n if not is_admin:\n subject = Subject.objects.get(short_code = str(row[1]).upper(), teachers=teacher)\n exiting = Result.objects.filter(student=student, subject=subject, \n student_class=row[5], term=row[6])\n if exiting.count() > 0:\n if existing[0].quiz_score == 0.0:\n exiting[0].test_score = row[3]\n if existing[0].exam_score == 0.0:\n existing[0].exam_score = row[4]\n if existing[0].assignment_score == 0.0:\n existing[0].assignment_score = row[2]\n existing[0].save()\n update_value+=1\n else:\n Result.objects.create(\n student=student,\n subject=subject,\n school=teacher.school,\n assignment_score=row[2],\n quiz_score=row[3],\n exam_score=row[4],\n student_class=student_class,\n term=row[6],\n session=row[7],\n )\n new_value+=1\t\t\t\n return new_value\n\n\[email protected]\ndef import_assignment_from_csv(csv_file, teacher, is_admin=False):\n \"\"\"\n Import result CSV data.\n\n We'll process all rows first and create Result model objects from them\n and perform a bulk create. This way, no records will be inserted unless\n all records are good.\n \n \"\"\"\n school = teacher.school\n limit = Limit(school.config.plan, school)\n csv_data = []\n ifile = csv_file.read().decode(\"utf-8\")\n for row in ifile.split('\\n'):\n csv_data.append(row.split(','))\n \n result_objects = []\n # Check if headers exists. Skip the first entry if true.\n header_check = ['student', 'subject', 'score', 'class', 'term', 'session']\n first_row = [i.lower().strip() for i in csv_data[0]]\n csv_data = csv_data[1:]\n \n count_value = 0 # To get the number of records entered\n found_value = 0\n\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n try:\n if row:\n if limit.limit_reached(Result):\n raise ValueError(\"You have exceeded the number of results you can upload. Upgrade your plan for more features\")\n else:\n student = Student.objects.get(reg_number=row[0])\n subject = Subject.objects.get(short_code = str(row[1]).upper(), teachers=teacher)\n student_class = StudentClass.objects.get(class_code=row[3])\n assignment = Assignment.objects.filter(student=student, subject=subject, student_class=student_class, term=row[4])\n if assignment.exists():\n assignment[0].assignment_score = row[2]\n assignment[0].save()\n found_value+=1\n else:\n Assignment.objects.create(\n student=student,\n subject=subject,\n school=teacher.school,\n assignment_score=row[2],\n student_class=student_class,\n term=row[4],\n session=row[5]\n )\n except Exception as e:\n raise ValueError(e)\t\n return count_value, found_value\n\n\[email protected]\ndef import_exam_from_csv(csv_file, teacher, is_admin=False):\n \"\"\"\n Import result CSV data.\n\n We'll process all rows first and create Result model objects from them\n and perform a bulk create. This way, no records will be inserted unless\n all records are good.\n \n \"\"\"\n school = teacher.school\n limit = Limit(school.config.plan, school)\n csv_data = []\n ifile = csv_file.read().decode(\"utf-8\")\n for row in ifile.split('\\n'):\n csv_data.append(row.split(','))\n \n result_objects = []\n # Check if headers exists. Skip the first entry if true.\n header_check = ['student', 'subject', 'score', 'class', 'term', 'session']\n first_row = [i.lower().strip() for i in csv_data[0]]\n \n csv_data = csv_data[1:]\n \n new_value = 0 # To get the number of records entered\n updated_value = 0\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n if row:\n if limit.limit_reached(Result):\n raise ValueError(\"You have exceeded the number of results you can upload. Upgrade your plan for more features\")\n else:\n student = Student.objects.get(reg_number=row[0])\n subject = Subject.objects.get(short_code = str(row[1]).upper())\n student_class = StudentClass.objects.get(class_code=row[3])\n existing = Examination.objects.filter(student=student, subject=subject, student_class=student_class,term=row[4])\n result = Result.objects.filter(student=student, subject=subject, student_class=student_class,term=row[4])\n if result.exists():\n rs = Result.objects.get(pk=result[0].id)\n rs.exam_score = row[2]\n rs.save()\n else:\n Result.objects.create(\n student=student, \n subject=subject, \n school=teacher.school,\n student_class=student_class,\n term=row[4], \n session=row[5],\n exam_score=row[2],\n signed_by=teacher\n )\n if existing.exists():\n existing[0].exam_score = row[2]\n existing[0].save()\n updated_value+=1\n else:\n exam = Examination(\n student=student,\n subject=subject,\n school=teacher.school,\n exam_score=row[2],\n student_class=student_class,\n term=row[4],\n session=row[5],\n )\n exam.save()\n new_value+=1\t\n return new_value, updated_value\n\n\nclass StudentReport(object):\n\n @classmethod\n def student_result(cls, student, school, term=None, year=None):\n data = {}\n results = Result.objects.filter(school=school, student=student)\n if term is not None:\n results = results.filter(term=term, date_created__year=year)\n data['student'] = student\n data['results'] = results\n data['total'] = sum([result.get_score for result in results])\n data['average'] = data['total']/results.count()\n\n else:\n for i in range(1,4):\n temp_data = {}\n results = results.filter(term=i)\n temp_data['total'] = sum([result.get_score for result in results])\n try:\n temp_data['average'] = temp_data['total']/results.count()\n except ZeroDivisionError:\n temp_data['average'] = 0.0\n temp_data['results'] = results\n data['student'] = student\n data['term-'+str(i)] = temp_data\n return data\n\n \n @classmethod\n def termly_report(cls, student, term=None, student_class=None, session=None):\n temp_data = {}\n try:\n results = Result.objects.filter(student=student, term=term, student_class=student_class)\n total_points = sum([result.get_score for result in results])\n try:\n average = float(total_points/results.count())\n except ZeroDivisionError:\n average = 0\n temp_data['total'] = total_points\n temp_data['average'] = \"%.2f\" % (average)\n temp_data['results'] = results\n temp_data['grade'] = grade(average, student.school)\n except: \n pass\n return temp_data\n\n @classmethod\n def students_termly_reports(cls, student_class, term=None, session=None):\n \"\"\"Returns a sorted dictionary of all the student's result for this term\n \n Arguments:\n student_class {StudentClass} -- The class a the students\n \n Keyword Arguments:\n term {integer} -- report term (default: {None})\n session {string} -- report session (default: {None})\n \n Returns:\n dict -- dictionary list of all students\n \"\"\"\n \n \n year = session.split('/') # getting the first index of the list\n \n students = []\n for student in Result.objects.filter(student_class=student_class, \n term=term, date_created__year__in=year).values('student').distinct():\n students.append(student)\n \n # get the data from the database now\n data = {}\n for student in students:\n report = cls.termly_report(student, term, student_class, session)\n\n # we need to pass in something readable to the key of the data\n student_id = \"id_{}\".format(student.get('student'))\n data[student_id] = [report.get('total', 0.0), report.get('average', 0.0)]\n \n # sort the dictionary by the average\n try:\n sorted_data = OrderedDict(sorted(data.items(), key=lambda x: x[1][1], reverse=True))\n except Exception as e:\n sorted_data = {}\n return sorted_data\n\n \n @classmethod\n def termly_average(cls, term=None, session=None):\n pass\n\n @classmethod\n def overall_grade(cls, student, term=None, year=None):\n temp_data = {}\n results = Result.objects.filter(student=student)\n try:\n if term is not None:\n results = results.filter(term=term)\n if year is not None:\n results = results.filter(date_created__year=year)\n total_points = sum([result.get_score for result in results])\n try:\n average = float(total_points/results.count())\n except ZeroDivisionError:\n average = 0\n temp_data['total'] = total_points\n temp_data['average'] = float(\"%.2f\" % (average))\n temp_data['results'] = results\n temp_data['grade'] = grade(average, student.school)\n data[student] = temp_data\n except: \n pass\n return temp_data\n\n @classmethod\n def get_student_position(cls, student, student_class, term=None, session=None):\n \"\"\"Return the student position in a particular class\n \n Arguments:\n student {object} -- student object\n student_class {object} -- class object\n \n Keyword Arguments:\n term {integer} -- term vlaue (default: {None})\n session {string} -- session value (default: {None})\n \n Returns:\n integer -- student's position\n \"\"\"\n\n try:\n # coerce the right format for the student id in the sorted dictionary\n student_id = \"id_{}\".format(student.id)\n \n sorted_data = cls.students_termly_reports(student_class, term=term, session=session)\n return list(sorted_data.keys()).index(student_id)+1\n except Exception as e:\n return -1\n\n @classmethod\n def board(cls, school, term=None): \n \"\"\"Progress award board data for a particular school\n \n Arguments:\n school {integer} -- school id\n \n Keyword Arguments:\n term {integer} -- term number (default: {None})\n \n Returns:\n dict -- sorted dictionary of students grade\n \"\"\"\n\n import datetime\n current_year = datetime.date.today().year\n students = Student.objects.filter(school=school)\n data = {student:cls.overall_grade(student, term=term, year=current_year).get('average') for student in students}\n sorted_data = OrderedDict(sorted(data.items(), key=operator.itemgetter(1), reverse=True))\n return sorted_data\n \n @classmethod\n def get_data(cls, student_class, session=None, term=None):\n \"\"\"Returns the students data in a particular class\n \n Arguments:\n student_class {object} -- student class object\n \n Keyword Arguments:\n session {string} -- session (default: {None})\n term {integer} -- term value (default: {None})\n \n Returns:\n dict -- students data\n \"\"\"\n\n data = {'students': [], 'header': [], 'scores': [], 'averages': [], 'position': []}\n sorted_data = cls.students_termly_reports(student_class, term=term, session=session)\n results = Result.objects.filter(student_class=student_class, term=term).select_related('student', 'student_class', 'subject')\n subjects = set([result.subject.name for result in results])\n position = get_position(sorted_data)\n\n # load the students data\n for student_index, [total, average] in sorted_data.items():\n student = Student.objects.get(id=student_index.split('_')[1])\n \n # compute the total scores\n scores = []\n for result in results.filter(student=student):\n scores.append(result.get_score)\n \n payload = {\n 'name': student.full_name,\n 'reg_number': student.reg_number,\n 'position': next(position),\n 'average': average,\n 'total': sum(scores),\n }\n rep = Repr(scores, **payload)\n data['students'].append(rep)\n data['obtainable'] = len(subjects) * 100\n data['class_no'] = len(sorted_data)\n for x in subjects:\n data['header'].append(x)\n return data\n\n\ndef grade(score, school):\n score = int(score)\n grades = Grading.objects.filter(institution=school)\n grade = [grade.caption.upper() for grade in grades if score in range(grade.start, grade.end+1)]\n \n return grade[0]\n\ndef get_position(sorted_data):\n '''\n [:Generator] Return student's position from a sorted report data.\n This is to factor in possibilities of ties\n '''\n lst = list(sorted_data.values())\n new_list = []\n current = 0\n prev = 0\n for i in range(len(sorted_data)):\n if lst[i] == lst[i-1]:\n prev = current+1\n new_list.append(prev)\n else:\n new_list.append(i+1)\n current = i\n for i in new_list:\n yield i\n\n\nclass Repr:\n def __init__(self, scores, **kwargs):\n self.name = kwargs.pop('name')\n self.reg_number = kwargs.pop('reg_number')\n self.position = kwargs.pop('position')\n self.average = kwargs.pop('average')\n self.total = kwargs.pop('total')\n self.scores = scores\n \n " }, { "alpha_fraction": 0.6653801798820496, "alphanum_fraction": 0.6676958799362183, "avg_line_length": 42.93220520019531, "blob_id": "039b6a0094b2cd9920386ffeeffe17be2fa41af0", "content_id": "c2e082850d1be0c4ba368da3959b49a0c500a8b8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2591, "license_type": "permissive", "max_line_length": 122, "num_lines": 59, "path": "/apps/config/context_processors.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from config.models import *\nfrom students.models import Student\nfrom results.models import Result\nfrom institutions.models import StudentClass\nfrom config.utils import ChartData\nfrom institutions.models import StudentClass\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom insights.models import ASSESSMENT_TYPE, Assessment\nfrom subjects.models import Subject\nfrom states.models import State\nfrom datetime import datetime\nfrom payments.models import AccessCard, AccessToken\n\ndef is_form_teacher(teacher):\n class_queryset = StudentClass.objects.filter(form_teacher=teacher)\n return class_queryset.exists()\n\ndef setup(request):\n context = {}\n if hasattr(request.user, 'student'):\n student = request.user.student\n year = datetime.today().year\n try:\n current_pin = AccessCard.objects.filter(student=student, year=year)\n context['current_pin'] = current_pin[0]\n except:\n context['current_pin'] = None\n if hasattr(request.user, 'teacher'):\n teacher = request.user.teacher\n results = Result.objects.filter(school=teacher.school, subject__teachers=teacher)\n student_class = StudentClass.objects.filter(school=request.user.teacher.school)\n values = ChartData.class_average(results, student_class)\n config = Config.objects.get(school=teacher.school)\n cp = config.current_performance\n pp = config.previous_performance\n diff = abs(cp-pp)\n try:\n context['token_length'] = AccessToken.objects.filter(token_application__school=teacher.school).count()\n context['cp'] = cp\n context['pp'] = pp\n context['config'] = config\n context['diff_in_perf'] = diff\n context['is_form_teacher'] = is_form_teacher(request.user.teacher)\n context['class_avg'] = float(\"%.2f\" % (sum(values['avg'])/len(values['avg'])))\n except ZeroDivisionError:\n context['class_avg'] = \"%.2f\" % (0)\n return context\n\ndef assessment_context(request):\n data = {}\n if hasattr(request.user, 'teacher'):\n data = {\n 'assessments': Assessment.objects.filter(school=request.user.teacher.school),\n 'classes': StudentClass.objects.filter(school=request.user.teacher.school),\n 'subjects': Subject.objects.filter(school=request.user.teacher.school),\n 'states': State.objects.all(),\n 'assessment_types': [{'key': x, 'value': dict(ASSESSMENT_TYPE)[x]} for x in range(1, len(ASSESSMENT_TYPE)+1)],\n }\n return data" }, { "alpha_fraction": 0.5577726364135742, "alphanum_fraction": 0.5763341188430786, "avg_line_length": 45.84782791137695, "blob_id": "770b5c021c19a71cf1363adb7868f34e74a78547", "content_id": "98fd22c6e42cfcd595397e3f28d9b8da9707785d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2155, "license_type": "permissive", "max_line_length": 129, "num_lines": 46, "path": "/apps/institutions/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Institution',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=150)),\n ('short_code', models.CharField(blank=True, max_length=10, null=True)),\n ('box_address', models.CharField(blank=True, max_length=20, null=True)),\n ('location', models.CharField(max_length=50)),\n ('registration_id', models.CharField(blank=True, max_length=30, null=True)),\n ('email', models.EmailField(max_length=254, null=True)),\n ('logo', models.ImageField(blank=True, null=True, upload_to='uploads/%Y/%m/%d')),\n ],\n ),\n migrations.CreateModel(\n name='StudentClass',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('caption', models.CharField(max_length=100, verbose_name='Class Form')),\n ('nick_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Class Name')),\n ('class_code', models.CharField(max_length=10, null=True)),\n ('class_icon', models.ImageField(blank=True, null=True, upload_to='uploads/%Y/%m/%d/')),\n ('max_student', models.PositiveIntegerField(default=0, verbose_name='Maximum Number of Students to Accomodate')),\n ('no_subjects', models.PositiveIntegerField(default=0, verbose_name='Number of subjects offered')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'ordering': ('caption', 'nick_name'),\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5994516611099243, "alphanum_fraction": 0.6041874289512634, "avg_line_length": 35.47272872924805, "blob_id": "045030e983172b7c3182a5e1df500cfd4c65f1b4", "content_id": "1ac2c0de1d2f3fb9e5e8ac217669b6b0bb373e85", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4012, "license_type": "permissive", "max_line_length": 131, "num_lines": 110, "path": "/apps/results/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import *\nfrom .fields import RestrictedFileField\n\nclass BatchResultForm(forms.ModelForm):\n def __int__(self, *args, **kwargs):\n super(BatchResultForm, self).__init__(*args, **kwargs)\n self.fields['semester'].widget.attrs = {'placeholder' : _(u'Full name'), 'class': 'form-control', 'style': 'height: 19px;'}\n self.fields['course'].widget.attrs = {'placeholder' : _(u'Email'), 'class': 'form-control'}\n self.fields['session'].widget.attrs = {'placeholder' : _(u'Subject'), 'class': 'form-control'}\n self.fields['course_load'].widget.attrs = {'placeholder' : _(u'Phone'), 'class': 'form-control'}\n self.fields['level'].widget.attrs = {'placeholder' : _(u'Message'), 'class': 'form-control', 'rows': '10'}\n self.fields['credit_load'].widget.attrs = {'placeholder' : _(u'Message'), 'class': 'form-control', 'rows': '10'}\n\n class Meta:\n model = Result\n exclude = ('semester', 'course', 'date_created', 'session', 'level', 'course_load', 'credit_load')\n\nclass ResultForm(forms.ModelForm):\n def __int__(self, *args, **kwargs):\n super(BatchResultForm, self).__init__(*args, **kwargs)\n self.fields['student'].widget.attrs = {'placeholder' : _(u'Full name'), 'class': 'form-control', 'style': 'height: 19px;'}\n self.fields['course'].widget.attrs = {'placeholder' : _(u'Email'), 'class': 'form-control'}\n self.fields['session'].widget.attrs = {'placeholder' : _(u'Subject'), 'class': 'form-control'}\n self.fields['level'].widget.attrs = {'placeholder' : _(u'Message'), 'class': 'form-control', 'rows': '10'}\n\n class Meta:\n model = Result\n exclude = ('date_created', 'course_load', 'credit_load', 'department')\n\n\n\nclass ImportForm(forms.Form):\n # File size limited to 2MB\n file = RestrictedFileField(\n label='Upload File (Max Size 2MB)',\n content_types=[\n 'application/binary',\n 'application/ms-excel',\n 'application/csv',\n 'application/octet-stream',\n 'application/vnd.ms-excel',\n 'text/csv',\n 'text/plain',\n ],\n max_upload_size=2097152,\n )\n\n\nclass BatchGradingForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(BatchGradingForm, self).__init__(*args, **kwargs)\n self.fields['caption'].widget.attrs = {'class': 'form-control input-sm', 'placeholder': 'Grade Letter'}\n self.fields['grade_points'].widget.attrs = {'class': 'form-control input-sm', 'placeholder': 'Grade Point', 'step': 'any'}\n self.fields['start'].widget.attrs = {'class': 'form-control input-sm', }\n self.fields['end'].widget.attrs = {'class': 'form-control input-sm', }\n\n class Meta:\n model = Grading\n exclude = ('institution',)\n\n\nclass GradingForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(GradingForm, self).__init__(*args, **kwargs)\n self.fields['institution'].widget.attrs = {'class': 'form-control input-sm'}\n\n class Meta:\n model = Grading\n fields = ('institution',)\n\n\nclass ExaminationForm(forms.ModelForm):\n\n class Meta:\n model = Examination\n fields = ('subject', 'student_class', 'term', 'session', 'exam_score')\n\nclass BatchExaminationForm(forms.ModelForm):\n\n class Meta:\n model = Examination\n fields = ('student', 'exam_score')\n\n\n\nclass AssignmentForm(forms.ModelForm):\n\n class Meta:\n model = Assignment\n fields = ('subject', 'student_class', 'term', 'session', 'assignment_score')\n\nclass BatchAssignmentForm(forms.ModelForm):\n\n class Meta:\n model = Assignment\n fields = ('student', 'assignment_score')\n\n\nclass TestForm(forms.ModelForm):\n\n class Meta:\n model = Test\n fields = ('subject', 'student_class', 'term', 'session', 'test_score')\n\nclass BatchTestForm(forms.ModelForm):\n\n class Meta:\n model = Test\n fields = ('student', 'test_score')\n" }, { "alpha_fraction": 0.5152625441551208, "alphanum_fraction": 0.5860806107521057, "avg_line_length": 39.95000076293945, "blob_id": "744043a03922a084a646e6f2e77437a84faa3bac", "content_id": "a8b14931ae7be3850ebb6a72ab36f6b389ae74c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "permissive", "max_line_length": 394, "num_lines": 20, "path": "/apps/institutions/migrations/0004_auto_20180619_1859.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-19 16:59\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('institutions', '0003_studentclass_generic_class'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='studentclass',\n name='generic_class',\n field=models.PositiveIntegerField(choices=[(1, 'Creche/Pre-Nursery'), (2, 'Nursery 1'), (3, 'Nursery 2'), (4, 'Nursery 3'), (5, 'Primary 1'), (6, 'Primary 2'), (7, 'Primary 3'), (8, 'Primary 4'), (9, 'Primary 5'), (10, 'Primary 6'), (11, 'JSS 1'), (12, 'JSS 2'), (13, 'JSS 3'), (14, 'SS 1'), (15, 'SS 2'), (16, 'SS 3')], null=True, verbose_name='Generic/Standard Class Equivalent'),\n ),\n ]\n" }, { "alpha_fraction": 0.5524111390113831, "alphanum_fraction": 0.5718813538551331, "avg_line_length": 57.893333435058594, "blob_id": "571f4b21a56c38ac03963b652fd4951a729c2da6", "content_id": "cf392e69c768305da4987f7c2e7a98a8f2b3eea3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4417, "license_type": "permissive", "max_line_length": 182, "num_lines": 75, "path": "/apps/reports/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('institutions', '0002_auto_20180523_0138'),\n ('staff', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BroadSheet',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_created', models.DateTimeField(auto_now_add=True, null=True)),\n ('date_modified', models.DateTimeField(auto_now=True, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Report',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('form_teacher_remark', models.TextField(blank=True, null=True)),\n ('head_remark', models.TextField(blank=True, null=True, verbose_name='Principal/Headmaster Remark')),\n ('attentiveness', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('attendance', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('hardworking', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('neatness', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('reliability', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('games', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('craft', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('punctuality', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('relationship_with_others', models.PositiveIntegerField(blank=True, choices=[(1, 'Very Weak'), (2, 'Weak'), (3, 'Fair'), (4, 'Good'), (5, 'Excellent')], null=True)),\n ('verified', models.BooleanField(default=False)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'ordering': ('-date_created', '-date_modified'),\n },\n ),\n migrations.CreateModel(\n name='ReportBatch',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('session', models.CharField(blank=True, max_length=15, null=True)),\n ('school_resume_date', models.DateField(blank=True, null=True)),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ],\n ),\n migrations.AddField(\n model_name='report',\n name='batch',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='reports.ReportBatch'),\n ),\n migrations.AddField(\n model_name='report',\n name='modified_by',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='staff.Teacher'),\n ),\n migrations.AddField(\n model_name='report',\n name='promoted_to',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='promoted_class', to='institutions.StudentClass'),\n ),\n ]\n" }, { "alpha_fraction": 0.5351473689079285, "alphanum_fraction": 0.6077097654342651, "avg_line_length": 22.210525512695312, "blob_id": "133105b62fc0373df4b5629eb32b2c5749dcf4d0", "content_id": "78bdf2c484e726dcf38c53827e4f246f9a059fcf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "permissive", "max_line_length": 65, "num_lines": 19, "path": "/apps/institutions/migrations/0005_auto_20180622_1055.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-22 08:55\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('institutions', '0004_auto_20180619_1859'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='studentclass',\n options={'ordering': ('generic_class', 'nick_name')},\n ),\n ]\n" }, { "alpha_fraction": 0.5800256133079529, "alphanum_fraction": 0.5800256133079529, "avg_line_length": 31.58333396911621, "blob_id": "17b2e76f372f5401fba376cc8fd565cc78cfd62f", "content_id": "92d620cb38ece6375a0f5605cc2300089814b7b1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "permissive", "max_line_length": 106, "num_lines": 24, "path": "/apps/api/filters.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "import rest_framework_filters as filters\nfrom students.models import Student\nfrom insights.models import Assessment\n\nclass StudentFilter(filters.FilterSet):\n class Meta:\n model = Student\n fields = {\n 'student_class': ['exact', 'in'],\n 'gender': ['exact'],\n 'state_of_origin': ['exact', 'in'],\n 'birth_date': ['in', 'lt', 'gt'],\n 'special_aids': ['exact', 'startswith']\n }\n\nclass AssessmentFilter(filters.FilterSet):\n students = filters.RelatedFilter(StudentFilter, field_name='students', queryset=Student.objects.all())\n\n class Meta:\n model = Assessment\n fields = {\n 'title': ['exact', 'startswith'],\n 'assessment_type': ['exact', 'startswith', 'in']\n }" }, { "alpha_fraction": 0.5645161271095276, "alphanum_fraction": 0.5691244006156921, "avg_line_length": 28.965517044067383, "blob_id": "590a2da61c780c60345235f1f394a13043dc0301", "content_id": "fcecac08334fcc1b47444e293d33cea1459a914e", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 868, "license_type": "permissive", "max_line_length": 84, "num_lines": 29, "path": "/sani_app/static/js/ajax-preprocessor.js", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "function getCookie(name) {\n let cookieValue = null;\n if (document.cookie && document.cookie !== '') {\n let cookies = document.cookie.split(';');\n for (let i=0; i<cookies.length; i++) {\n let cookie = $.trim(cookies[i]);\n if (cookie.substring(0, name.length + 1) === (name + '=')) {\n cookieValue = decodeURIComponent(cookie.substring(name.length + 1));\n break;\n }\n }\n } \n return cookieValue;\n}\n\nvar csrftoken = getCookie('csrftoken');\n\nfunction csrfSafeMethod(method) {\n // These don't require CSRF protection\n return (/^(GET|HEAD|OPTIONS|TRACE)$/.test(method));\n}\n\n$.ajaxSetup({\n beforeSend: function(xhr, settings) {\n if (!csrfSafeMethod(settings.type) && !this.crossDomain) {\n xhr.setRequestHeader(\"X-CSRFToken\", csrftoken);\n }\n }\n});" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5099714994430542, "avg_line_length": 27.1200008392334, "blob_id": "09f6e039372e17e4975bee5828ea5c8af01918aa", "content_id": "351112a6973e0ea657cca63efa36abef242d1e43", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 702, "license_type": "permissive", "max_line_length": 80, "num_lines": 25, "path": "/sani_app/templates/password_change.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block title %}Password Change{% endblock %}\n\n{% block content %}\n<div id=\"class-page-content\">\n\n <div class=\"teachers table\">\n <div class=\"header\">\n <h1>Change Password</h1>\n <div class=\"separator\"></div>\n </div>\n </div>\n <div class=\"panel panel-default\">\n <form method=\"POST\" action=\"\">{% csrf_token %}\n <div class=\"form-group\" style=\"padding: 20px;\">\n {{form}}\n </div>\n <div class=\"form-group\" style=\"padding: 0px 20px;\">\n <button type=\"submit\" class=\"btn btn-main\">Save Changes</button>\n </div>\n </form>\n </div>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.6416300535202026, "alphanum_fraction": 0.6516180634498596, "avg_line_length": 33.054420471191406, "blob_id": "326a3877a83af14e1a862fb57db35dacddf6b806", "content_id": "62afd9130fe2a5cd282ddf8a0dba6161de80a1f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5006, "license_type": "permissive", "max_line_length": 108, "num_lines": 147, "path": "/apps/payments/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom datetime import timedelta\nfrom django.conf import settings\nfrom config.utils import pin_generator\n# Create your models here.\n\nclass AccessCard(models.Model):\n student = models.ForeignKey('students.Student', null=True)\n access_code = models.CharField(max_length=20, null=True)\n term = models.PositiveIntegerField(choices=settings.TERM_CHOICES, null=True)\n year = models.CharField(max_length=4, null=True)\n session = models.CharField(max_length=10, null=True)\n school_id = models.CharField(max_length=20, null=True)\n school_token = models.ForeignKey('payments.AccessToken', null=True)\n validated = models.BooleanField(default=False)\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"(%s, %s)\" % (self.student, self.access_code)\n\n class Meta:\n unique_together = ('student', 'term', 'session')\n ordering = ('-date_created',)\n\n def save(self, **kwargs):\n if not self.access_code:\n self.access_code = 'PIN-'+pin_generator(length=10)\n super(AccessCard, self).save(**kwargs)\n\n\n\nclass AccessToken(models.Model):\n token = models.CharField(max_length=30, null=True, blank=True)\n token_application = models.ForeignKey('payments.TokenApplication')\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '%s: (%s)' % (self.token, self.token_application.school)\n\n class Meta:\n ordering = ('-date_created',)\n\n def save(self, **kwargs):\n if not self.token:\n self.token = 'TOKEN-'+pin_generator(length=15)\n super(AccessToken, self).save(**kwargs)\n\n\nclass TokenApplication(models.Model):\n STATUS = (\n ('I', 'Initialized'),\n ('A', 'Approved'),\n ('P', 'Processing'),\n ('D', 'Declined'),\n ('C', 'Cancelled')\n )\n NAIRA = 'Naira'\n DOLLARS = 'Dollars'\n\n PAYMENT_CURRENCY = (\n (NAIRA, \"Naira\"),\n (DOLLARS, \"Dollars\"),\n )\n\n GTPAY = 'GTPay'\n PAYPAL = 'PayPal'\n PAYSTACK = 'Paystack'\n TELLER = 'Teller'\n\n PAYMENT_METHOD = (\n (GTPAY, \"GTPay\"),\n (PAYSTACK, 'Paystack'),\n (PAYPAL, 'Paypal'),\n (TELLER, 'Bank Transfer/Teller'),\n )\n application_id = models.CharField(max_length=30, null=True)\n school = models.ForeignKey('institutions.Institution', null=True)\n year = models.CharField(max_length=4, null=True)\n session = models.CharField(max_length=10, null=True)\n term = models.PositiveIntegerField(choices=settings.TERM_CHOICES, null=True)\n is_paid = models.BooleanField(default=False)\n payment_method = models.CharField(editable=False, max_length=50, choices=PAYMENT_METHOD, null=True)\n payment_id = models.CharField(max_length=21, null=True)\n status = models.CharField(max_length=1, choices=STATUS, default='I')\n date_created = models.DateTimeField(auto_now_add=True)\n # the fields below are optional. They vary based on the payment method\n teller_number = models.CharField(max_length=20, null=True,blank=True)\n teller_date = models.DateField(null=True,blank=True)\n\n\n def __str__(self):\n return '%s --> %s' % (self.school, self.application_id)\n\n class Meta:\n ordering = ('-date_created',)\n\n def save(self, **kwargs):\n if not self.payment_id:\n self.payment_id = 'SANI_PAY_'+pin_generator(length=10)\n self.application_id = 'SANI_APPLY_'+pin_generator(length=10)\n super(TokenApplication, self).save(**kwargs)\n\n @property\n def get_token(self):\n access_token = AccessToken.objects.get(token_application=self)\n return access_token.token\n\n @property\n def get_status(self):\n status = {'I': 'Initialized', 'A': 'Approved', 'P': 'Processing', 'D': 'Declined', 'C': 'Cancelled'}\n return status[self.status]\n\n @property\n def get_term(self):\n terms = {1:'First Term', 2:'Second Term', 3:'Third Term'}\n return terms[self.term].upper()\n\n\nclass Plan(models.Model):\n name = models.CharField(max_length=50)\n plan_code = models.CharField(max_length=25, blank=True)\n amount = models.DecimalField(decimal_places=2, max_digits=10, default=0.0)\n student_limit = models.PositiveIntegerField(default=0)\n staff_limit = models.PositiveIntegerField(default=0)\n result_limit = models.PositiveIntegerField(default=0)\n max_admin = models.PositiveIntegerField(default=1)\n\n def __str__(self):\n return \"%s - %s\" % (self.name, self.plan_code)\n\n @property\n def amount_in_kobo(self):\n return int(self.amount * 100)\n\n\nclass Subscription(models.Model):\n customer = models.ForeignKey(\"institutions.Institution\")\n active = models.BooleanField(default=True)\n subscription_code = models.CharField(max_length=25)\n plan = models.ForeignKey(Plan)\n subscribed_on = models.DateTimeField()\n\n def __str__(self):\n return self.subscription_code\n\n def disable_subscription(self):\n self.active = False\n" }, { "alpha_fraction": 0.5457284450531006, "alphanum_fraction": 0.5601463317871094, "avg_line_length": 47.91579055786133, "blob_id": "dccf9714b0da60311d423c3e65e4f71d90c5c843", "content_id": "ec0d6f8c35ddd7cb6d363294e4f0f7e14c832f68", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4647, "license_type": "permissive", "max_line_length": 183, "num_lines": 95, "path": "/apps/payments/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('institutions', '0002_auto_20180523_0138'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AccessCard',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('access_code', models.CharField(max_length=20, null=True)),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('year', models.CharField(max_length=4, null=True)),\n ('school_id', models.CharField(max_length=20, null=True)),\n ('validated', models.BooleanField(default=False)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ('-date_created',),\n },\n ),\n migrations.CreateModel(\n name='AccessToken',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('token', models.CharField(blank=True, max_length=30, null=True)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ('-date_created',),\n },\n ),\n migrations.CreateModel(\n name='Plan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('plan_code', models.CharField(blank=True, max_length=25)),\n ('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),\n ('student_limit', models.PositiveIntegerField(default=0)),\n ('staff_limit', models.PositiveIntegerField(default=0)),\n ('result_limit', models.PositiveIntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('active', models.BooleanField(default=True)),\n ('subscription_code', models.CharField(max_length=25)),\n ('subscribed_on', models.DateTimeField()),\n ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Plan')),\n ],\n ),\n migrations.CreateModel(\n name='TokenApplication',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('application_id', models.CharField(max_length=30, null=True)),\n ('year', models.CharField(max_length=4, null=True)),\n ('term', models.PositiveIntegerField(choices=[(1, 'First Term'), (2, 'Second Term'), (3, 'Third Term')], null=True)),\n ('is_paid', models.BooleanField(default=False)),\n ('payer', models.CharField(max_length=30, null=True, verbose_name='Applicant')),\n ('payment_id', models.CharField(max_length=21, null=True)),\n ('status', models.CharField(choices=[('I', 'Initialized'), ('A', 'Approved'), ('P', 'Processing'), ('D', 'Declined'), ('C', 'Cancelled')], default='I', max_length=1)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ],\n options={\n 'ordering': ('-date_created',),\n },\n ),\n migrations.AddField(\n model_name='accesstoken',\n name='token_application',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.TokenApplication'),\n ),\n migrations.AddField(\n model_name='accesscard',\n name='school_token',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='payments.AccessToken'),\n ),\n ]\n" }, { "alpha_fraction": 0.7454426884651184, "alphanum_fraction": 0.7454426884651184, "avg_line_length": 36.778690338134766, "blob_id": "f0e883cf79b8040a2e17c31a756dd8886ee05979", "content_id": "08c09000a592fbcd3da9378ce239e29c2b27d7a4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4608, "license_type": "permissive", "max_line_length": 87, "num_lines": 122, "path": "/apps/api/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse, JsonResponse\nfrom rest_framework.response import Response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom students.models import Student\nfrom .serializers import *\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\nfrom rest_framework import generics\nfrom results.models import Result\nfrom .permissions import *\nfrom rest_framework.permissions import IsAuthenticated\n# from .filters import AssessmentFilter\nfrom institutions.models import *\nfrom insights.models import Assessment, GroupAssessment\n\n# Create your views here.\ndef insights(request):\n return render(request, 'insights/dashboard.html', {}) \n\n\nclass StudentAPIView(generics.ListCreateAPIView):\n serializer_class = StudentSerializer\n permission_classes = (IsAuthenticated, StaffAdminAccessPermission)\n\n def get_queryset(self):\n teacher = self.request.user.teacher\n queryset = Student.objects.filter(school=teacher.school)\n return queryset\n\nclass GroupAPIView(generics.ListCreateAPIView):\n serializer_class = GroupSerializer\n permission_classes = (IsAuthenticated, StaffAdminAccessPermission)\n\n def get_queryset(self):\n teacher = self.request.user.teacher\n queryset = GroupAssessment.objects.filter(school=teacher.school)\n return queryset\n\n\nclass StudentRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (IsAuthenticated, StudentOwnAccountOrStaffPermission)\n serializer_class = StudentSerializer\n lookup_url_kwarg = 'slug'\n \n def get_queryset(self):\n queryset = {}\n if hasattr(self.request.user, 'teacher'):\n teacher = self.request.user.teacher\n queryset = Student.objects.filter(school=teacher.school)\n if hasattr(self.request.user, 'student'):\n student = self.request.user.student\n queryset = Student.objects.filter(school=student.school)\n return queryset\n\n\nclass AssessmentRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (IsAuthenticated, StaffAccessPermission)\n serializer_class = AssessmentSerializer\n\n def get_queryset(self):\n return Assessment.objects.filter(school=self.request.user.teacher.school)\n\n\nclass GroupRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (IsAuthenticated, StaffAccessPermission)\n serializer_class = GroupSerializer\n\n def get_queryset(self):\n return GroupAssessment.objects.filter(school=self.request.user.teacher.school)\n\n\nclass ResultAPIView(generics.ListAPIView):\n serializer_class = ResultSerializer\n filter_fields = ('term', 'student_class')\n permission_classes = (IsAuthenticated, StaffAdminAccessPermission)\n\n def get_queryset(self, *args, **kwargs):\n queryset = Result.objects.filter(school=self.request.user.teacher.school)\n return queryset\n\n\nclass AssessmentAPIView(generics.ListCreateAPIView):\n serializer_class = AssessmentSerializer\n permission_classes = (IsAuthenticated, StaffAccessPermission)\n # filter_class = AssessmentFilter\n\n def get(self, request, *args, **kwargs):\n queryset = Assessment.objects.filter(school=request.user.teacher.school)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n \n def get_queryset(self):\n return Assessment.objects.filter(school=self.request.user.teacher.school)\n\n\nclass StudentRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (IsAuthenticated, StudentOwnAccountOrStaffPermission)\n serializer_class = AssessmentSerializer\n\n def get_queryset(self):\n queryset = {}\n if hasattr(self.request.user, 'teacher'):\n teacher = self.request.user.teacher\n queryset = Assessment.objects.filter(school=teacher.school)\n return queryset\n \n def get(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n students = get_students(instance)\n results = get_results(instance)\n reports = get_reports(instance)\n \n \nclass StudentClassAPIView(generics.ListCreateAPIView):\n serializer_class = StudentClassSerializer\n permission_classes = (IsAuthenticated, StaffAccessPermission)\n\n def get_queryset(self, *args, **kwargs):\n queryset = StudentClass.objects.filter(school=self.request.user.teacher.school)\n return queryset" }, { "alpha_fraction": 0.49721115827560425, "alphanum_fraction": 0.7043824791908264, "avg_line_length": 15.95945930480957, "blob_id": "d1f07c0c3774b6df8abc7c9384f62dfe280e9f2f", "content_id": "21c184cbfdc9543e05a57ecdebb6bb4e8847aae9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1255, "license_type": "permissive", "max_line_length": 35, "num_lines": 74, "path": "/requirements.txt", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "atomicwrites==1.1.5\nattrs==18.1.0\nautopep8==1.4.3\ncairocffi==0.6\nCairoSVG==2.1.3\ncertifi==2018.4.16\ncffi==1.11.5\nchardet==3.0.4\nClick==7.0\ncolorama==0.3.9\ncoverage==4.5.1\ncssselect==1.0.3\ncssselect2==0.2.1\ndefusedxml==0.5.0\ndiff-match-patch==20121119\nDjango==1.11\ndjango-crontab==0.7.1\ndjango-easy-pdf==0.1.1\ndjango-filter==2.0.0\ndjango-import-export==1.0.1\ndjango-silk==3.0.1\ndjango-tables2==1.21.2\ndjangorestframework==3.8.2\ndjangorestframework-filters==0.10.2\net-xmlfile==1.0.1\nFlask==1.0.2\ngevent==1.3.7\ngprof2dot==2016.10.13\ngreenlet==0.4.15\nhtml5lib==1.0b10\nhttplib2==0.11.3\nidna==2.6\nitsdangerous==1.1.0\njdcal==1.4\nJinja2==2.10\nlocustio==0.9.0\nlxml==4.2.1\nMarkupSafe==1.1.0\nmore-itertools==4.2.0\nmsgpack==0.6.0\nodfpy==1.3.6\nopenpyxl==2.5.4\npaystackapi==1.2.6\nPillow==5.0.0\npluggy==0.6.0\npy==1.5.4\npycodestyle==2.4.0\npycparser==2.18\nPygments==2.2.0\nPyPDF2==1.26.0\nPyphen==0.9.4\npytest==3.6.2\npytest-cov==2.5.1\npython-dateutil==2.7.5\npytz==2018.3\nPyYAML==3.13\npyzmq==17.1.2\nreportlab==3.4.0\nrequests==2.18.4\nsimplejson==3.14.0\nsix==1.11.0\nsorl-thumbnail==12.4.1\nsqlparse==0.2.4\ntablib==0.12.1\ntinycss==0.4\ntinycss2==0.6.1\nunicodecsv==0.14.1\nurllib3==1.22\nWeasyPrint==0.34\nwebencodings==0.5.1\nWerkzeug==0.14.1\nxhtml2pdf==0.2.1\nxlrd==1.1.0\nxlwt==1.3.0\n" }, { "alpha_fraction": 0.6623058319091797, "alphanum_fraction": 0.6623058319091797, "avg_line_length": 47.959999084472656, "blob_id": "ea46ff81f143ebc6a57539f356425b0a39d9373c", "content_id": "417f76d5c58857b57ecb9fa4a4336116afd5fa32", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "permissive", "max_line_length": 87, "num_lines": 25, "path": "/apps/results/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^grading-scheme$', grading_setting, name='grading-setting'),\n url(r'^teacher/results$', StaffBasedResultView.as_view(), name='staff-result'),\n url(r'^student/results$', StudentBasedResultView.as_view(), name='student-result'),\n url(r'^result/import$', import_data, name='result-import'),\n url(r'^edit/$', edit_result, name='edit-result'),\n url(r'^test/import$', test_imports, name='test-import'),\n url(r'^exam/import$', exam_imports, name='exam-import'),\n url(r'^assignment/import$', assignment_imports, name='assignment-import'),\n url(r'^new/exam$', ExamCreateView.as_view(), name='new-exam'),\n url(r'^new/test$', TestCreateView.as_view(), name='new-test'),\n url(r'^new/assignment$', AssignmentCreateView.as_view(), name='new-assignment'),\n url(r'^delete/(?P<id>\\d+)$', delete, name='delete'),\n \n #JSON URLs\n url(r'^student/json$', student_json, name='student-json'),\n url(r'^average/json$', get_average_json, name='average-json'),\n url(r'^demographics/json$', student_demographics_json, name='demographic-json'),\n\n #Batch Entries\n url(r'^batch$', batch_results, name='batch-result'),\n]" }, { "alpha_fraction": 0.6199377179145813, "alphanum_fraction": 0.6199377179145813, "avg_line_length": 34.66666793823242, "blob_id": "a27e610a7a57d45cf9a267ae47670c107b680a5d", "content_id": "464f4d65e5819d078f1ec6e880bd90c817e68eb7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "permissive", "max_line_length": 72, "num_lines": 9, "path": "/apps/subjects/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^new$', new_subject, name='new-subject'),\n url(r'^$', SubjectListView.as_view(), name='list'),\n url(r'^edit/(?P<subject_id>\\d+)$', edit_subject, name='edit'),\n url(r'^delete/(?P<subject_id>\\d+)$', delete_subject, name='delete'),\n]\n" }, { "alpha_fraction": 0.7067669034004211, "alphanum_fraction": 0.7067669034004211, "avg_line_length": 21.33333396911621, "blob_id": "21b929801947bd960dd2a5144b9a2ba6656f9d74", "content_id": "8daa8a4c0a4a75acc36888aa318ec03d37e48b6e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "permissive", "max_line_length": 43, "num_lines": 6, "path": "/apps/contacts/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom contacts.views import contact_us\n\nurlpatterns = [\n url(r'^$', contact_us, name='contact'),\n]" }, { "alpha_fraction": 0.5590822100639343, "alphanum_fraction": 0.584130048751831, "avg_line_length": 52.3775520324707, "blob_id": "4ce8710f419024f044919937be430b4a009144ef", "content_id": "f7b8cd6ccd27411e1101f82414e0bf1e5ed6b8ad", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5230, "license_type": "permissive", "max_line_length": 170, "num_lines": 98, "path": "/apps/insights/templates/insights/_filters.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% load static %}\n\n<ul>\n {% if genders %}\n <li onclick=\"toggleMe('id_gender')\">Gender</li>\n <div class=\"filter\" id=\"id_gender\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n <input type=\"checkbox\" name=\"gender\" id=\"\" value=\"M\"> Male <br>\n <input type=\"checkbox\" name=\"gender\" id=\"\" value=\"F\"> Female\n </div>\n {% endif %}\n {% if filter_classes %}\n <li onclick=\"toggleMe('id_class')\">Class</li>\n <div class=\"filter\" id=\"id_class\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for class in filter_classes %}\n <input type=\"checkbox\" name=\"class\" value=\"{{class.id}}\"> {{class|truncatechars:15}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if assessment %}\n <li onclick=\"toggleMe('id_average_range')\">Grade Range</li>\n <div class=\"filter\" id=\"id_average_range\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n <div class=\"range-list\">\n <h5>Overall Average</h5>\n <p id=\"id_average_value\" style=\"text-align: center; font-size: 14px; font-weight: 500;\"></p>\n <input type=\"range\" name=\"averages\" id=\"id_average_value\" min=\"0\" max=\"100\" step=\"0.1\" value=\"0\" oninput=\"changeSlider('id_average_value', this.value, 100)\">\n <img src=\"{% static 'img/icons/range.png' %}\" alt=\"Overall Range\" width=\"150\"><hr>\n </div>\n <div class=\"range-list\">\n <h5>Ranking</h5>\n <p id=\"id_ranking_value\" style=\"text-align: center; font-size: 14px; font-weight: 500;\"></p>\n <input id=\"id_ranking\" name=\"rank\" type=\"range\" min=\"0\" max=\"100\" step=\"1\" value=\"0\" oninput=\"changeSlider('id_ranking_value', this.value, 100)\">\n <img src=\"{% static 'img/icons/range.png' %}\" alt=\"Overall Range\" width=\"150\"><hr>\n </div>\n <div class=\"range-list\">\n <h5>Class Performance</h5>\n <p id=\"id_class_value\" style=\"text-align: center; font-size: 14px; font-weight: 500;\"></p>\n <input type=\"range\" name=\"\" id=\"\" min=\"0\" max=\"100\" step=\"0.1\" value=\"50\" oninput=\"changeSlider('id_class_value', this.value, 100)\">\n <img src=\"{% static 'img/icons/range.png' %}\" alt=\"Overall Range\" width=\"150\">\n </div>\n </div>\n {% endif %}\n {% if years %}\n <li onclick=\"toggleMe('id_year')\">Year of Birth</li>\n <div class=\"filter\" id=\"id_year\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for year in years %}\n <input type=\"checkbox\" name=\"class\" id=\"\"> {{year}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if not states %}\n <li onclick=\"toggleMe('id_state')\">Geo Zone</li>\n <div class=\"filter\" id=\"id_state\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for state in states %}\n <input type=\"checkbox\" name=\"geo_location\" id=\"{{state.id}}\"> {{state|truncatechars:20}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if courses %}\n <li onclick=\"toggleMe('id_course')\">Course Proficiency</li>\n <div class=\"filter\" id=\"id_course\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for course in courses %}\n <input type=\"checkbox\" name=\"class\" id=\"\"> {{course|truncatechars:18}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if aids %}\n <li onclick=\"toggleMe('id_aid')\">Special Aids</li>\n <div class=\"filter\" id=\"id_aid\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for aid in aids %}\n <input type=\"checkbox\" name=\"aids\" id=\"\" value=\"{{aid.key}}\"> {{aid.value}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if religion %}\n <li onclick=\"toggleMe('id_religion')\">Religion</li>\n <div class=\"filter\" id=\"id_religion\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for religion in religions %}\n <input type=\"checkbox\" name=\"class\" id=\"\"> {{religion}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if subjects_compared %}\n <li onclick=\"toggleMe('id_sub_compare')\">Subject Comparison</li>\n <div class=\"filter\" id=\"id_sub_compare\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n {% for subject in subjects_compared %}\n <input type=\"checkbox\" name=\"subjects_comparison\" id=\"id_subject_comparison\" value=\"{{subject.id}}\"> {{subject|truncatechars:16}}<br>\n {% endfor %}\n </div>\n {% endif %}\n {% if term %}\n <li onclick=\"toggleMe('id_term')\">Term</li>\n <div class=\"filter\" id=\"id_term\" style=\"padding: 5px; text-align: left; background: #fafafa; font-size: 13px; font-weight: 200;\">\n <input type=\"checkbox\" name=\"term\" id=\"id_term_val\" value=\"1\"> First Term<br>\n <input type=\"checkbox\" name=\"term\" id=\"id_term_val\" value=\"2\"> Second Term<br>\n <input type=\"checkbox\" name=\"term\" id=\"id_term_val\" value=\"3\"> Third Term<br>\n </div>\n {% endif %}\n</ul>" }, { "alpha_fraction": 0.6900853514671326, "alphanum_fraction": 0.7028890252113342, "avg_line_length": 45.86153793334961, "blob_id": "00251c7b1f8d1e513f972be00f44d90ebeea080b", "content_id": "721ae5fd7ec1dfe23a119d4f52b6740bdd813537", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3046, "license_type": "permissive", "max_line_length": 134, "num_lines": 65, "path": "/apps/config/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nPAYMENT_CHOICES = (\n ('Bank', 'Pay through bank teller'.title()),\n ('Card', 'Pay via card'.title()),\n)\nUPDATE_CHOICES = (\n ('Quarterly', 'Quarterly'),\n ('Anually', 'Annually')\n)\nclass Config(models.Model):\n school = models.OneToOneField('institutions.Institution')\n current_performance = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n previous_performance = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n current_class_average = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n previous_class_average = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n show_attendance = models.BooleanField(\"Use psychomotors in report\", default=False)\n automatic_update = models.BooleanField('Update Class Automatically', default=False)\n session_average = models.BooleanField(\"Generate a sessional average\", default=False)\n use_position = models.BooleanField(\"Generate position in report\", default=True)\n update = models.CharField('How often do you want the performance updated?', max_length=20, choices=UPDATE_CHOICES, default='Card')\n payment_method = models.CharField('How would you like to pay?', max_length=20, choices=PAYMENT_CHOICES, default='Quarterly')\n plan = models.ForeignKey('payments.Plan', null=True)\n\n #We'll be using this to check to know if the school has made a change to the default plan\n plan_changed = models.BooleanField(default=False)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"%s [CP:%s, PP:%s]\" % (self.school, self.current_performance, self.previous_performance)\n \n class Meta:\n verbose_name = u'School Configuration'\n\n\nclass StudentConfig(models.Model):\n student = models.OneToOneField('students.Student')\n current_performance = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n previous_performance = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n current_class_average = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n previous_class_average = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"%s [CP: %s, PP: %s]\" % (self.student, self.current_performance, self.previous_performance)\n\n @property\n def get_difference(self):\n if self.current_performance == self.previous_performance:\n return float(0.0)\n else:\n return float(\"%.2f\" % (self.current_performance - self.previous_performance))\n\n @property\n def is_high(self):\n if self.current_performance == self.previous_performance:\n return True\n else:\n return self.current_performance > self.previous_performance\n \n class Meta:\n verbose_name = u'Student Configuration'\n" }, { "alpha_fraction": 0.558178722858429, "alphanum_fraction": 0.61551433801651, "avg_line_length": 28.649999618530273, "blob_id": "1d097318795054321af7ff6916bfbb6c7faf5ffa", "content_id": "f81ea9a56df436cb8545a365cba69f7e535f1ed6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "permissive", "max_line_length": 187, "num_lines": 20, "path": "/apps/config/migrations/0003_auto_20180619_1837.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-19 16:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('config', '0002_auto_20180523_0138'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='config',\n name='payment_method',\n field=models.CharField(choices=[('Bank', 'Pay Through Bank Teller'), ('Card', 'Pay Via Card')], default='Quarterly', max_length=20, verbose_name='How would you like to pay?'),\n ),\n ]\n" }, { "alpha_fraction": 0.6364921927452087, "alphanum_fraction": 0.6435643434524536, "avg_line_length": 38.654205322265625, "blob_id": "2dbaff54a2ea5e8c94e55cd406db0e17f252894c", "content_id": "3ea18c1de17b45c7a920100f97e6344b25b56bf5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4242, "license_type": "permissive", "max_line_length": 171, "num_lines": 107, "path": "/apps/core/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom students.models import StudentClass, Student\nfrom staff.models import Teacher\nimport datetime\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom results.utils import StudentReport\nfrom results.models import Result\nfrom subjects.models import Subject\n# Create your views here.\n\n@login_required\ndef dashboard(request):\n context = {}\n template_name='dashboard.html'\n if hasattr(request.user, 'teacher'):\n teacher = request.user.teacher\n high, low = leadership_board(request)\n if teacher.is_admin:\n results = Result.objects.filter(school=teacher.school)\n male, female = (Student.objects.filter(gender='M', school=request.user.teacher.school), Student.objects.filter(gender='F', school=request.user.teacher.school))\n try:\n ratio=float(len(male)/len(female))\n except ZeroDivisionError:\n ratio = 0\n context = {\n 'years': [i for i in range(datetime.date.today().year, 1999, -1)],\n 'classes': StudentClass.objects.filter(school=request.user.teacher.school),\n 'students': Student.objects.get_active_students(request.user.teacher.school),\n 'staff': Teacher.objects.filter(school=request.user.teacher.school),\n 'ratio': \"%.2f\" % (ratio), \n 'low': low,\n 'high': high,\n 'result_upload': results.count() \n\n }\n else:\n results = Result.objects.filter(school=teacher.school, subject__teachers=teacher)\n context[\"low\"] = low\n context['high'] = high\n context['result_upload'] = results.count()\n if hasattr(request.user, 'student'):\n student = get_object_or_404(Student, user=request.user)\n try:\n rankings = StudentReport.board(student.school,term=1)\n my_rank = list(rankings.keys()).index(student)+1\n context = {\n 'student': student,\n 'rank': my_rank,\n 'classes': StudentClass.objects.filter(school=request.user.student.school),\n 'subjects': Subject.objects.filter(school=request.user.student.school),\n 'results': Result.objects.filter(student=request.user.student)\n }\n except:\n raise ValueError(\"Wrong Configuration\")\n return render(request, template_name, context)\n\n\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef leadership_board(request):\n data = None\n #students = [student for student in StudentReport.board(students,term=1)]\n school = request.user.teacher.school\n students_list = Student.objects.get_active_students(school)\n data = StudentReport.board(school=request.user.teacher.school, term=1)\n \n students = [student for student in data.keys()]\n count = 5\n high = students[:count]\n low = students[(-count):]\n return high, low\n\n\ndef index(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse('dashboard'))\n return render(request, 'index.html', {})\n\n\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(request, 'Your password was successfully updated!')\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n messages.error(request, 'Please correct the error below.')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'password_change.html', {\n 'form': form\n })\n\n\ndef error_404(request):\n data = {}\n return render(request, 'core/error_404.html', data)\n\ndef error_500(request):\n data = {}\n return render(request, 'core/error_500.html', data)" }, { "alpha_fraction": 0.48120301961898804, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 38.900001525878906, "blob_id": "caa3ab03ac6be30bf944dd29bc9475c7120559c6", "content_id": "83f393b06928dc737617effe9aa5698e9bb2bbe5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "permissive", "max_line_length": 383, "num_lines": 20, "path": "/apps/institutions/migrations/0003_studentclass_generic_class.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-19 16:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('institutions', '0002_auto_20180523_0138'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='studentclass',\n name='generic_class',\n field=models.CharField(choices=[(1, 'Creche/Pre-Nursery'), (2, 'Nursery 1'), (3, 'Nursery 2'), (4, 'Nursery 3'), (5, 'Primary 1'), (6, 'Primary 2'), (7, 'Primary 3'), (8, 'Primary 4'), (9, 'Primary 5'), (10, 'Primary 6'), (11, 'JSS 1'), (12, 'JSS 2'), (13, 'JSS 3'), (14, 'SS 1'), (15, 'SS 2'), (16, 'SS 3')], max_length=30, null=True, verbose_name='Generic Class Name'),\n ),\n ]\n" }, { "alpha_fraction": 0.7396520972251892, "alphanum_fraction": 0.7402519583702087, "avg_line_length": 38.69047546386719, "blob_id": "a1733921b85f1b79952860396d1079b60b4bf88b", "content_id": "76840669f11ccc904c1c2dda8f8ce89b23f3a4a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1667, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/apps/config/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom .forms import SchoolSetupForm\nfrom config.models import Config\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n@login_required\ndef setup(request):\n\tform = []\n\tstudent_setup = None\n\tschool_config = None\n\tif hasattr(request.user, 'student'):\n\t\tstudent_setup = StudentSetup.objects.get(user=request.user)\n\tif hasattr(request.user, 'teacher'):\n\t\tschool_config = Config.objects.filter(school=request.user.teacher.school)[0]\n\tif request.method == 'POST':\n\t\tif hasattr(request.user, 'teacher'):\n\t\t\tform = SchoolSetupForm(request.POST, instance=school_config)\n\t\t\tconfig = form.save(commit=False)\n\n\t\t\t#check to make sure the user doesn't go back and forth on the free plan \n\t\t\tif config.plan.name == 'Free' and config.plan_changed:\n\t\t\t\tmessages.error(request, \"Sorry the Free/Trial Plan is one-off \\\n\t\t\t\t and cannot be reverted to. For more information, \\\n\t\t\t\t please contact the SANIFY Developer Team for help\")\n\t\t\t\treturn redirect('config:setup')\n\t\t\telse:\n\t\t\t\tconfig.plan_changed = True\n\t\t\tconfig.save()\n\t\t\tmessages.success(request, \"Your configuration has been saved\")\n\t\t\treturn redirect('dashboard')\n\t\telif hasattr(request.user, 'student'):\n\t\t\tform = StudentSetupForm(request.POST, instance=student_setup)\n\t\t\tform.save()\n\t\t\tmessages.success(request, \"Your configuration has been saved\")\n\t\t\treturn redirect('dashboard')\n\telse:\n\t\tif hasattr(request.user, 'teacher'):\n\t\t\tform = SchoolSetupForm(instance=school_config)\n\t\telif hasattr(request.user, 'student'):\n\t\t\tform = StudentSetupForm(instance=student_setup)\n\treturn render(request, 'config/setup.html', {'form': list(form)})\n" }, { "alpha_fraction": 0.7757009267807007, "alphanum_fraction": 0.7757009267807007, "avg_line_length": 31.200000762939453, "blob_id": "0e49b11d316a324a728d9093b0de1508cce03434", "content_id": "4ebba2ca136cb4413406bf6ae17b657e08a80b58", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "permissive", "max_line_length": 77, "num_lines": 10, "path": "/apps/config/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Config, StudentConfig\n\[email protected](Config)\nclass ConfigAdmin(admin.ModelAdmin):\n list_display = ('school',)\n\[email protected](StudentConfig)\nclass StudentConfigAdmin(admin.ModelAdmin):\n list_display = ('student', 'previous_performance', 'current_performance')" }, { "alpha_fraction": 0.41770902276039124, "alphanum_fraction": 0.42626726627349854, "avg_line_length": 36.51852035522461, "blob_id": "a79df0ad638c1cb9240af9e69092d8c964afa7e0", "content_id": "d0fd36fcaab8a5040bfa30e905c05bbaffb8d46c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3038, "license_type": "permissive", "max_line_length": 139, "num_lines": 81, "path": "/apps/results/templates/results/grading_scheme.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'results/all_results.html' %}\n{% load staticfiles %}\n{% block title %}Grading Setup{% endblock %}\n{% block top_menu %}{% endblock %}\n{% block extracss %}\n<style>\n input[type=text], input[type=number] {\n text-transform: uppercase;\n font-size: 13px;\n }\n label {\n font-weight: 200;\n }\n</style>\n{% endblock %}\n{% block inner-content %}\n<div id=\"class-page-content\">\n <div class=\"teachers table\">\n <div class=\"header\">\n <h1>Grade Configuration</h1>\n \n <div class=\"separator\"></div>\n </div>\n </div>\n\n <div class=\"col-md-10 center-block\">\n <div class=\"panel panel-default\">\n <form method=\"post\">{% csrf_token %}\n <div class=\"panel-body\">\n <div class=\"alert alert-info\">\n <p>All letters for the grading scheme must be in uppercase.</p>\n </div>\n <div class=\"form-group\">\n <input type=\"hidden\" value=\"{{user.teacher.school.id}}\" name=\"institution\" class=\"form-control\">\n </div>\n <div class=\"sub-panel add-padding\">\n {{batch_grading_formset.management_form}}\n {% for batch_form in batch_grading_formset %}\n <div class=\"form-group batch_formset\" style=\"border-radius: 4px; border: 1px solid #e5e5e5; padding-bottom: 10px;\">\n <div class=\"col-md-3\">\n <label for=\"\">Caption</label>\n {{batch_form.caption}}\n </div>\n <div class=\"col-md-3\">\n <label for=\"\">Grade Points</label>\n {{batch_form.grade_points}}\n </div>\n <div class=\"col-md-3\">\n <label for=\"\">Start</label>\n {{batch_form.start}}\n </div>\n <div class=\"col-md-3\">\n <label for=\"\">End</label>\n {{batch_form.end}}\n <br>\n {{batch_form.id}}\n </div>\t\n\n </div>\n\n {% endfor %}\n </div>\n <hr>\n <div class=\"sub-panel add-padding\" style=\"float: left;\">\n <button class=\"status pay\" type=\"submit\" style=\"padding: 7px 30px; border: none;\">Save Grading Scheme</button>\n </div>\n </div>\n </form>\n </div>\n </div>\n</div>\n{% endblock %}\n{% block extrajs %}\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery.formset/1.2.0/jquery.formset.js\"></script>\n<script>\n\t$('.batch_formset').formset({\n\t\taddText: 'Add',\n\t\tdeleteText: 'Remove',\n\t});\n</script>\n{% endblock %}" }, { "alpha_fraction": 0.6501446962356567, "alphanum_fraction": 0.6559334397315979, "avg_line_length": 35.13071823120117, "blob_id": "a8bd56fe9cb63dc4bb1bb5672d67ab1098c4f4c9", "content_id": "47e07a18c5f07ca6db8c46b90fcf999ae121787b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5528, "license_type": "permissive", "max_line_length": 117, "num_lines": 153, "path": "/apps/students/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\nfrom institutions.models import *\nimport uuid\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.db.models import signals\nfrom django.utils.timezone import now\n\n\n(SINGLE, MARRIED, WIDOWED, DIVORCED) = range(1, 5)\nMARITAL_STATUS_CHOICES = (\n (SINGLE, _(u'Single')),\n (MARRIED, _(u'Married')),\n (WIDOWED, _(u'Widowed')),\n (DIVORCED, _(u'Divorced')),\n)\n\nSEX_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female'),\n)\n\nRELIGION_CHOICES = (\n (1, 'Christianity'),\n (2, 'Islam'),\n (3, 'Others'),\n)\n\nclass StudentManager(models.Manager):\n def get_active_students(self, school):\n return super(StudentManager, self).get_queryset().filter(user_status='A', school=school)\n \n def get_graduated(self):\n return super(StudentManager, self).get_queryset().filter(user_status='G')\n \n def get_suspended(self):\n return super(StudentManager, self).get_queryset().filter(user_status='S')\n \n def get_expelled(self):\n return super(StudentManager, self).get_queryset().filter(user_status='E')\n \n def get_others(self):\n return super(StudentManager, self).get_queryset().filter(user_status='L')\n\n\nclass Student(models.Model):\n user = models.OneToOneField(User)\n photo = models.ImageField(upload_to='uploads/%Y/%m/%d', blank=True)\n last_name = models.CharField(verbose_name=_(u'Surname'), max_length=50, null=True)\n first_name = models.CharField(verbose_name=_(u'First name'), max_length=50, null=True)\n middle_name = models.CharField(verbose_name=_(u'Middle name'), max_length=50, blank=True)\n user_status = models.CharField(choices=settings.STATUS_CHOICES, max_length=1, default='A', blank=True, null=True)\n reg_number = models.CharField(max_length=30)\n student_class = models.ForeignKey('institutions.StudentClass', null=True)\n gender = models.CharField(max_length=1, null=True, choices=SEX_CHOICES)\n school = models.ForeignKey(Institution, null=True)\n birth_date = models.DateField(blank=True, null=True, db_index=True)\n address = models.CharField(max_length=100, null=True, blank=True)\n phone_number = models.CharField(max_length=15, null=True, blank=True)\n parent_phone_number = models.CharField(max_length=15, null=True, blank=True)\n year_of_admission = models.DateField(null=True, blank=True)\n religion = models.PositiveIntegerField(choices=RELIGION_CHOICES, blank=True, null=True)\n permanent_address = models.TextField(blank=True)\n state_of_residence = models.ForeignKey('states.State', null=True,blank=True, related_name='students_residence')\n country = models.ForeignKey('states.Country', related_name='country_of_residence', null=True, blank=True)\n state_of_origin = models.ForeignKey('states.State', related_name='students_origin', blank=True, null=True)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n slug = models.SlugField(max_length=255, blank=True, unique=True)\n special_aids = models.CharField(max_length=5, choices=settings.SPECIAL_NEEDS, default=\"No\")\n objects = StudentManager()\n\n\n class Meta:\n verbose_name = _(u'Student')\n verbose_name_plural = _(u'Students')\n ordering = ('last_name',)\n\n\n\n def __str__(self):\n names = [self.last_name]\n if self.middle_name:\n names.append(self.middle_name)\n names.append(self.first_name)\n return u' '.join(names)\n\n @property\n def full_name(self):\n \"\"\"Returns this student's full name.\"\"\"\n names = [self.last_name]\n if self.middle_name:\n names.append(self.middle_name)\n names.append(self.first_name)\n return u' '.join(names)\n\n @property\n def get_status(self):\n \"\"\"The current readable status of the student\n \n Returns:\n string -- student's status\n \"\"\"\n\n status = dict(settings.STATUS_CHOICES)\n if self.user_status:\n return status[self.user_status]\n \n @property\n def get_gender(self):\n \"\"\"The student's gender\n \n Returns:\n string -- student's gender\n \"\"\"\n\n gender = dict(SEX_CHOICES)\n if self.gender:\n return gender[self.gender]\n \n def get_religion(self):\n religion = dict(RELIGION_CHOICES)\n return religion[self.religion]\n\n def get_absolute_url(self):\n return reverse('students:profile', kwargs={'student_slug':self.slug})\n\n\nclass UniqueMapper(models.Model):\n reg_number = models.CharField(max_length=30)\n short_institution_name = models.CharField(max_length=5, null=True)\n unique_map = models.CharField(max_length=50, blank=True)\n created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name = _(u'Unique Mapper')\n verbose_name_plural = _(u'Unique Mappers')\n ordering = ('short_institution_name',)\n\n def __str__(self):\n return \"%s-%s\" % (self.short_institution_name.upper(), self.unique_map)\n\n\n def save(self, **kwargs):\n uuid_found = True\n while uuid_found:\n get_uuid = uuid.uuid4()\n self.unique_map = \"%s-%s\" % (self.short_institution_name, get_uuid)\n if not UniqueMapper.objects.filter(unique_map=self.unique_map).exists():\n uuid_found = False\n super(UniqueMapper, self).save(**kwargs)\n" }, { "alpha_fraction": 0.6203956604003906, "alphanum_fraction": 0.6277285218238831, "avg_line_length": 33.69822311401367, "blob_id": "92adb9cc1f45625491ce2906a96ab2e9695fa192", "content_id": "4879e613b01c6b960fad34982153664abfc3df47", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5864, "license_type": "permissive", "max_line_length": 131, "num_lines": 169, "path": "/apps/results/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom config.utils import decimal_add\nfrom django.utils.translation import ugettext_lazy as _\nfrom students.models import Student\nfrom django.conf import settings\nfrom functools import reduce\nfrom decimal import Decimal\n\n\nclass Grading(models.Model):\n institution = models.ForeignKey('institutions.Institution', null=True)\n caption = models.CharField(max_length=15, null=True, unique=True)\n grade_points = models.DecimalField(max_digits=2, decimal_places=1, null=True)\n start = models.IntegerField(null=True, default=0)\n end = models.PositiveIntegerField(null=True, default=100)\n\n class Meta:\n verbose_name = _(u'Grading')\n verbose_name_plural = _(u'Gradings')\n ordering = ('caption',)\n\n def __str__(self):\n return self.caption\n\n\nclass ScoreMixin(models.Model):\n student = models.ForeignKey(Student, null=True)\n school = models.ForeignKey('institutions.Institution', null=True)\n subject = models.ForeignKey('subjects.Subject', null=True)\n student_class = models.ForeignKey('institutions.StudentClass', null=True)\n term = models.PositiveIntegerField(choices=settings.TERM_CHOICES, null=True)\n session = models.CharField(max_length=10, blank=True, null=True)\n date_created = models.DateField(auto_now_add=True, null=True)\n date_modified = models.DateTimeField(auto_now=True, null=True)\n modified_by = models.ForeignKey('staff.Teacher', null=True, blank=True)\n\n class Meta:\n abstract = True\n\nclass Assignment(ScoreMixin):\n assignment_score = models.DecimalField(null=True, default=0.0, decimal_places=2, max_digits=6)\n\n class Meta:\n ordering = ('-date_created',)\n\n def __str__(self):\n return '%s' % (self.assignment_score)\n\n def save(self, **kwargs):\n super(Assignment, self).save(**kwargs)\n result = Result.objects.filter(student=self.student, subject=self.subject, student_class=self.student_class,term=self.term)\n if result.exists():\n rs = Result.objects.get(pk=result[0].id)\n rs.assignment_score = self.assignment_score\n rs.save()\n else:\n Result.objects.create(\n student=self.student,\n subject=self.subject,\n school=self.school,\n student_class=self.student_class,\n term=self.term,\n session=self.session,\n assignment_score=self.assignment_score\n )\n\n\nclass Test(ScoreMixin):\n test_score = models.DecimalField(default=0.0, decimal_places=2, max_digits=6)\n\n class Meta:\n ordering = ('-date_created',)\n\n def __str__(self):\n return '%s' % (self.test_score)\n\n def save(self, **kwargs):\n super(Test, self).save(**kwargs)\n result = Result.objects.filter(student=self.student, subject=self.subject, student_class=self.student_class,term=self.term)\n if result.exists():\n rs = Result.objects.get(pk=result[0].id)\n rs.test_score = self.test_score\n rs.save()\n else:\n Result.objects.create(\n student=self.student,\n subject=self.subject,\n school=self.school,\n student_class=self.student_class,\n term=self.term,\n session=self.session,\n test_score=self.test_score\n )\n\n\n\nclass Examination(ScoreMixin):\n exam_score = models.DecimalField(null=True, default=0.0, decimal_places=2, max_digits=6)\n\n class Meta:\n ordering = ('-date_created',)\n\n def __str__(self):\n return '%s' % (self.exam_score)\n\n def save(self, **kwargs):\n super(Examination, self).save(**kwargs)\n result = Result.objects.filter(student=self.student, subject=self.subject, student_class=self.student_class,term=self.term)\n if result.exists():\n rs = Result.objects.get(pk=result[0].id)\n rs.exam_score = self.exam_score\n rs.save()\n else:\n Result.objects.create(\n student=self.student,\n subject=self.subject,\n school=self.school,\n student_class=self.student_class,\n term=self.term,\n session=self.session,\n exam_score=self.exam_score,\n )\n\n\nclass ResultManager(models.Manager):\n\n def get_staff_results(self, request):\n return self.filter(subject__teachers=request.user.teacher)\n\n\nclass Result(ScoreMixin):\n exam_score = models.DecimalField(decimal_places=2, max_digits=6, default=0.0)\n test_score = models.DecimalField(decimal_places=2, max_digits=6, default=0.0)\n assignment_score = models.DecimalField(decimal_places=2, max_digits=6, default=0.0)\n signed_by = models.ForeignKey('staff.Teacher', related_name=\"signed_teacher\", null=True, blank=True)\n teacher_comment = models.TextField(null=True, blank=True)\n objects = ResultManager()\n\n def __str__(self):\n return \"%s %s\" % (self.student, self.get_score)\n\n class Meta:\n ordering = ('date_created',)\n\n def get_total_score(self):\n total = reduce(decimal_add, [self.exam_score,self.assignment_score, self.test_score, 0.0])\n return total\n\n @property\n def get_score(self):\n return self.get_total_score()\n\n @property\n def get_ca_score(self):\n return reduce(decimal_add, [self.assignment_score, self.test_score, 0.0])\n\n @property\n def grade(self):\n grade = None\n try:\n score = self.get_total_score()\n grades = Grading.objects.filter(institution=self.student.school)\n grade = [grade.caption.upper() for grade in grades if score in range(grade.start, grade.end+1)]\n except:\n grade = ['X', 'XX']\n return grade[0]\n\nclass ResultBatch(models.Model):\n pass\n" }, { "alpha_fraction": 0.5110024213790894, "alphanum_fraction": 0.5892420411109924, "avg_line_length": 20.526315689086914, "blob_id": "1560f5d3e224abaf5198fa58db33fd3edd0182b0", "content_id": "fce4263328227c22a4f571b9709d8202217971f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "permissive", "max_line_length": 46, "num_lines": 19, "path": "/apps/awards/migrations/0003_auto_20180423_1838.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-04-23 16:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('awards', '0002_auto_20180421_2154'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='award',\n options={'ordering': ('rating',)},\n ),\n ]\n" }, { "alpha_fraction": 0.650741457939148, "alphanum_fraction": 0.6534565687179565, "avg_line_length": 38.00611114501953, "blob_id": "2aa0f296dc7bcd12d06019693dda4111aae7a362", "content_id": "9963b2a6c9bebaac277359ed29b1a02e527be75d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19152, "license_type": "permissive", "max_line_length": 155, "num_lines": 491, "path": "/apps/results/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404, render_to_response\nfrom django.forms import formset_factory\nfrom django.forms.models import modelformset_factory\nfrom django.db.models import Count, Sum, Aggregate\nfrom django.db import transaction, IntegrityError\nfrom django.views.generic import ListView, CreateView\nfrom django.core import serializers\nfrom .forms import *\nfrom .models import *\nfrom institutions.models import StudentClass\nfrom .utils import update_results, StudentReport\nfrom django.template import RequestContext\nfrom institutions.models import Institution, StudentClass\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.utils.decorators import method_decorator\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom subjects.models import Subject\nfrom students.models import Student\nfrom config.utils import ChartData\nfrom config.models import StudentConfig\nimport logging\nimport datetime\nimport os\nfrom .utils import (import_test_from_csv,\n import_exam_from_csv,\n\t\t\t\t\timport_all_from_csv,\n\t\t\t\t\timport_assignment_from_csv)\nimport csv\nfrom django.conf import settings\nfrom config.utils import Limit\ntry:\n import json\nexcept:\n import simplejson as json\n\n\n\n@login_required\[email protected]\n#@user_passes_test(lambda u: u.teacher.is_admin, login_url=\"/auth/login/\")\ndef grading_setting(request):\n initial_data = Grading.objects.filter(institution=request.user.teacher.school)\n\n extra=1\n if len(initial_data) > 0:\n extra = 0\n GradingFormset = modelformset_factory(Grading, form=BatchGradingForm, extra=extra)\n if request.method == \"POST\":\n params = request.POST\n institution_id = params.get('institution', '')\n\n if institution_id:\n institution = get_object_or_404(Institution, pk=institution_id)\n batch_grading_formset = GradingFormset(request.POST, queryset=initial_data)\n\n if batch_grading_formset.is_valid():\n try:\n for grade in batch_grading_formset:\n grade = grade.save(commit=False)\n grade.institution = institution\n grade.save()\n update_results(institution)\n messages.success(request, \"Your grading scheme has been saved successfully and \\\n all existing results in the database has been modified accordingly.\")\n return HttpResponseRedirect(reverse('dashboard'))\n except IntegrityError:\n messages.error(request, \"There was an error saving your grading scheme. Please try and again or contact the Grade-X Team\")\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n messages.info(request, \"Sorry, You need to register an institution to set a grading scheme\")\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n gradingform = GradingForm()\n batch_grading_formset = GradingFormset(queryset=initial_data)\n\n context = {\n 'grading_form': gradingform,\n 'batch_grading_formset': batch_grading_formset\n }\n\n return render(request, 'results/grading_scheme.html', context)\n\n\nclass StaffBasedResultView(ListView):\n model = Result\n template_name = 'results/teacher_result_list.html'\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self, **kwargs):\n queryset = None\n if self.request.user.teacher.is_admin:\n queryset = Result.objects.filter(school=self.request.user.teacher.school)\n else:\n queryset = Result.objects.get_staff_results(self.request)\n params = self.request.GET\n\n #filter the results pages on the query parameters\n reg_number = params.get('reg_number','')\n class_id = params.get('class','all')\n subject_id = params.get('subject','all')\n term = params.get('term','all')\n\n if reg_number !='':\n queryset = queryset.filter(student__reg_number__icontains=reg_number)\n if class_id !='all':\n student_class = get_object_or_404(StudentClass, pk=class_id)\n queryset = queryset.filter(student_class=student_class)\n if term != 'all':\n queryset = queryset.filter(term=term)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(StaffBasedResultView, self).get_context_data(**kwargs)\n results = self.get_queryset()\n paginator = Paginator(results, self.paginated_by)\n\n page = self.request.GET.get('page')\n\n try:\n results = paginator.page(page)\n except PageNotAnInteger:\n results = paginator.page(1)\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n context['results'] = results\n context['count'] = len(results)\n context['classes'] = StudentClass.objects.filter(school=self.request.user.teacher.school)\n context['subjects'] = Subject.objects.filter(teachers=self.request.user.teacher)\n return context\n\n @method_decorator(login_required, user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/'))\n def dispatch(self, request, *args, **kwargs):\n return super(StaffBasedResultView, self).dispatch(request, *args, **kwargs)\n\nclass StudentBasedResultView(ListView):\n model = Result\n template_name = 'results/student_result_list.html'\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self, **kwargs):\n queryset = Result.objects.filter(student=self.request.user.student)\n params = self.request.GET\n\n #filter the results pages on the query parameters\n class_id = params.get('class','all')\n subject_id = params.get('subject','all')\n term = params.get('term','all')\n\n if class_id !='all':\n student_class = get_object_or_404(StudentClass, pk=class_id)\n queryset = queryset.filter(student_class=student_class)\n if term != 'all':\n queryset = queryset.filter(term=term)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(StudentBasedResultView, self).get_context_data(**kwargs)\n results = self.get_queryset()\n paginator = Paginator(results, self.paginated_by)\n\n page = self.request.GET.get('page')\n\n try:\n results = paginator.page(page)\n except PageNotAnInteger:\n results = paginator.page(1)\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n context['results'] = results\n context['count'] = len(results)\n context['classes'] = StudentClass.objects.filter(school=self.request.user.student.school)\n return context\n\n\n@login_required\n@user_passes_test(lambda u: u.teacher, login_url=\"/auth/login/\")\ndef import_data(request):\n form = ImportForm()\n return render(request, 'results/result_import.html', {'form': form})\n\n\n@user_passes_test(lambda u: u.teacher, login_url='/auth/login/')\n@login_required\ndef assignment_imports(request):\n data = {}\n if request.method == 'POST':\n import_file = request.FILES['file']\n get_upload_function(request, import_file, request.user.teacher, csv_func=import_assignment_from_csv)\n\n return HttpResponseRedirect(reverse(\"results:staff-result\"))\n\n\n@user_passes_test(lambda u: u.teacher, login_url='/auth/login/')\n@login_required\ndef test_imports(request):\n if request.method == 'POST':\n import_file = request.FILES['file']\n get_upload_function(request, import_file, request.user.teacher, csv_func=import_test_from_csv)\n\n return HttpResponseRedirect(reverse(\"results:staff-result\"))\n\n\n@user_passes_test(lambda u: u.teacher, login_url='/auth/login/')\n@login_required\ndef exam_imports(request):\n if request.method == 'POST':\n import_file = request.FILES['file']\n get_upload_function(request, import_file, request.user.teacher, csv_func=import_exam_from_csv)\n\n return HttpResponseRedirect(reverse(\"results:staff-result\"))\n\n\n\ndef get_upload_function(request, csv_file, teacher, csv_func=None):\n \"\"\"\n DESCRIPTION\n\n \"\"\"\n try:\n if not csv_file.name.endswith('.csv'):\n messages.error(request,'File is not CSV type')\n return HttpResponseRedirect(reverse(\"results:result-import\"))\n # If the file is too large\n if csv_file.multiple_chunks():\n messages.error(request,\"Uploaded file is too big (%.2f MB).\" % (csv_file.size/(1000*1000),))\n return HttpResponseRedirect(reverse(\"results:result_import\"))\n # else continue\n try:\n if csv_func:\n count, found = csv_func(csv_file, teacher)\n messages.success(request, \"Your records were successfully imported.\\r\\n Total Records: %s. \\r\\n Existing Records: %s\" % (count, found))\n except ValueError as e:\n messages.error(request, e)\n except Exception as e:\n messages.error(request, e)\n\n\nclass ExamCreateView(CreateView):\n form_class = ExaminationForm\n template_name = 'results/new_exam.html'\n success_url = 'exam'\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n\n # Make sure the logged_in staff is authorised to add records for this subject\n if not self.object.subject.teachers == self.request.user.teacher:\n raise ValueError(\"Sorry! You do not have access rights to create this records\")\n\n #retrieve the student's reg number from the request\n student_reg_number = self.request.POST.get('reg_number')\n student = get_object_or_404(Student, reg_number=student_reg_number)\n self.object.student = student\n self.object.school = student.school\n\n #save the results\n self.object.save()\n messages.success(self.request, \"%s's %s score saved successfully\" % (self.object.student, self.object.subject))\n return super(ExamCreateView, self).form_valid(form)\n \n @method_decorator(login_required, user_passes_test(lambda u: hasattr(u, 'teacher')))\n def dispatch(self, request, *args, **kwargs):\n school = request.user.teacher.school\n limit = Limit(school.config.plan, school)\n if limit.limit_reached(Result):\n raise ValueError(\"Limit reached. Upgrade your plan for more features\")\n return super(ExamCreateView, self).dispatch(request, *args, **kwargs)\n\n\nclass TestCreateView(CreateView):\n form_class = TestForm\n template_name = 'results/new_test.html'\n success_url = 'test'\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n\n # Make sure the logged_in staff is authorised to add records for this subject\n if not self.object.subject.teachers == self.request.user.teacher:\n raise ValueError(\"Sorry! You do not have access rights to create this records\")\n\n # retrieve the student's reg number from the request\n student_reg_number = self.request.POST.get('reg_number')\n student = get_object_or_404(Student, reg_number=student_reg_number)\n self.object.student = student\n self.object.school = student.school\n\n # save the results\n self.object.save()\n messages.success(self.request, \"%s's %s score saved successfully\" % (self.object.student, self.object.subject))\n return super(TestCreateView, self).form_valid(form)\n\n @method_decorator(login_required, user_passes_test(lambda u: hasattr(u, 'teacher')))\n def dispatch(self, request, *args, **kwargs):\n school = request.user.teacher.school\n limit = Limit(school.config.plan, school)\n if limit.limit_reached(Result):\n raise ValueError(\"Limit reached. Upgrade your plan for more features\")\n return super(TestCreateView, self).dispatch(request, *args, **kwargs)\n\n\nclass AssignmentCreateView(CreateView):\n form_class = AssignmentForm\n template_name = 'results/new_assignment.html'\n success_url = 'new-assignment'\n \n @method_decorator(login_required, user_passes_test(lambda u: hasattr(u, 'teacher')))\n def dispatch(self, request, *args, **kwargs):\n school = request.user.teacher.school\n limit = Limit(school.config.plan, school)\n if limit.limit_reached(Result):\n raise ValueError(\"Limit reached. Upgrade your plan for more features\")\n return super(AssignmentCreateView, self).dispatch(request, *args, **kwargs)\n\n\ndef batch(request, model=None, form_type=None, extra=0, data=None):\n\n \n # import pdb; pdb.set_trace()\n if request.POST:\n \n ScoreFormset = formset_factory(batch_form, extra=extra)\n else:\n ScoreFormset = formset_factory(BatchExaminationForm)\n \n\n context = {\n 'batch_score_formset': batch_score_formset\n }\n return context\n\n\[email protected]\ndef batch_results(request):\n # initialize all forms to used in the formset\n forms = {\n 'examination': BatchExaminationForm,\n 'test': BatchTestForm,\n 'assignment': BatchAssignmentForm\n }\n \n # \n batch_form = forms['test']\n payload = None\n batch_type = None\n \n # create the formset\n ScoreFormset = formset_factory(batch_form, extra=1, can_delete=True, can_order=True)\n\n if request.method == 'POST':\n data = {}\n params = request.POST\n added_payload['subject_id'] = params.get('subject')\n added_payload['student_class_id'] = params.get('class')\n added_payload['term'] = params.get('term')\n added_payload['batch_type'] = params.get('type')\n \n # to be sure the added data from the POST request are valid\n if added_payload['term'] and added_payload['subject'] and added_payload['class']:\n batch_score_formset = ScoreFormset(request.POST)\n\n if batch_score_formset.is_valid():\n try:\n for score_form in batch_score_formset:\n score_form = score_form.save(commit=False)\n score_form.subject = added_payload['subject']\n score_form.student_class = added_payload['class']\n score_form.term = added_payload['term']\n score_form.save()\n messages.success(request, \"\")\n except IntegrityError:\n messages.error(request, \"\")\n else:\n messages.info(request, \"\")\n else:\n batch_score_formset = ScoreFormset()\n\n return render(request, 'results/batch.html', {'batch_score_formset': batch_score_formset})\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef get_average_json(request):\n data = {}\n params = request.GET\n student_class = StudentClass.objects.filter(school=request.user.teacher.school)\n year = params.get('year', '')\n term = params.get('term', '')\n name = params.get('name', '')\n current_year = datetime.date.today().year\n\n if name == 'class_average':\n results = Result.objects.filter(school=request.user.teacher.school)\n if term != 'all':\n results = results.filter(term=term)\n if year != 'all':\n results = results.filter(date_created__year=year)\n if year == 'all':\n results = results.filter(date_created__year=current_year)\n data = ChartData.class_average(results, student_class)\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@login_required\ndef student_demographics_json(request):\n data = None\n params = request.GET\n student_class = StudentClass.objects.filter(school=request.user.teacher.school)\n d_class = params.get('class', '')\n name = params.get('name', '')\n\n if name == 'class_demographic':\n students = Student.objects.filter(school=request.user.teacher.school)\n if d_class != 'all':\n data = ChartData.class_demographic(students, student_class.filter(id=d_class))\n else:\n data = ChartData.class_demographic(students, student_class)\n\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@login_required\n@user_passes_test(lambda u: u.teacher, login_url='/auth/login')\ndef edit_result(request):\n if request.method == \"POST\":\n params = request.POST\n exam_score = params.get('exam_score')\n test_score = params.get('quiz_score')\n assignment_score = params.get('assignment_score')\n result_id = params.get('result_id')\n class_id = params.get('class_id')\n teacher_comment = params.get('comment')\n result = get_object_or_404(Result, pk=result_id)\n\n #update result details \n result.exam_score = exam_score\n result.test_score = test_score\n result.assignment_score = assignment_score\n result.modified_by = request.user.teacher\n result.signed_by = request.user.teacher\n result.student_class = get_object_or_404(StudentClass, pk=class_id)\n result.teacher_comment = teacher_comment\n result.save()\n messages.success(request, \"The result details for %s (%s) was successfully updated by %s\" % (result.student, result.subject, request.user.teacher))\n return HttpResponseRedirect(reverse('results:staff-result'))\n\n\n@user_passes_test(lambda u: u.teacher, login_url='/auth/login')\n@login_required\ndef delete(request, id):\n if request.user.teacher.is_admin:\n result = get_object_or_404(Result, pk=id)\n result.delete()\n messages.success(request, \"%s's %s result was successfully deleted\" % (result.student, result.subject))\n return HttpResponseRedirect(reverse('results:staff-result'))\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'student'), login_url='/auth/login/')\ndef student_json(request):\n params = request.GET\n term = params.get('term', '')\n name = params.get('name', '')\n class_id = params.get('class', '')\n subject_id = params.get('subject', '')\n student = get_object_or_404(Student, user=request.user)\n results = Result.objects.filter(student=student)\n data = {}\n if name == 'current_report':\n if term != 'all':\n results = results.filter(term=term)\n if subject_id != 'all':\n subject = get_object_or_404(Subject, pk=subject_id)\n results = results.filter(subject=subject)\n if class_id != 'all' and class_id != 'current':\n student_class = get_object_or_404(StudentClass, pk=class_id)\n results = results.filter(student_class=student_class)\n if class_id == 'current':\n results = results.filter(student_class=student.student_class)\n data = ChartData.current_report(request.user.student, results)\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\ndef staff_json(request):\n data = {}\n params = request.GET\n clas_id=params.get('class')\n pass\n" }, { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 27.83333396911621, "blob_id": "ac02223b9f09a596550f284e4f76e23e0f6552f4", "content_id": "a25a9a681529d74b02dd7be99250b23fad4b9233", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "permissive", "max_line_length": 53, "num_lines": 6, "path": "/apps/awards/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Award\n\[email protected](Award)\nclass AwardAdmin(admin.ModelAdmin):\n list_display = ('title', 'description', 'rating')" }, { "alpha_fraction": 0.6317907571792603, "alphanum_fraction": 0.6317907571792603, "avg_line_length": 40.5, "blob_id": "ee0861bf0392c4b99393d9c474cc2f8eaeb4ad11", "content_id": "e95538aa5235e6eebf2f69e0f9ed5b00bdaec3b5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "permissive", "max_line_length": 74, "num_lines": 12, "path": "/apps/institutions/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^classes/$', StudentClassListView.as_view(), name='class-list'),\n url(r'^class/new$', create_class, name='new-class'),\n url(r'^new$', create_school, name='new-school'),\n url(r'^class/json$', get_class_json, name='class-json'),\n url(r'^edit/(?P<class_id>\\d+)$', edit_class, name='edit'),\n url(r'^edit/school$', edit_institution, name='school-edit'),\n url(r'^reg-json$', reg_json, name='reg-json'),\n]" }, { "alpha_fraction": 0.7239583134651184, "alphanum_fraction": 0.7239583134651184, "avg_line_length": 11.866666793823242, "blob_id": "9918e639e7a0c9f2dd73fb43148c14cae1120b3e", "content_id": "156d28f61cea9c5b297585a1d2ef5461741a48c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "permissive", "max_line_length": 54, "num_lines": 15, "path": "/apps/payments/error.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from requests import RequestException, ConnectionError\n\nclass Error(RequestException):\n \"\"\"Summanry\"\"\"\n\n pass\n\n\nclass APIError(Error):\n\n pass\n\nclass InvalidDataError(Error):\n\n pass" }, { "alpha_fraction": 0.5818330645561218, "alphanum_fraction": 0.585106372833252, "avg_line_length": 35.49253845214844, "blob_id": "1d4916688e614d1b3cfe54c789ae306892b3c006", "content_id": "d6d36166e02c6f53e2c9b6d3dbe66b79659dc759", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2444, "license_type": "permissive", "max_line_length": 166, "num_lines": 67, "path": "/sani_app/static/js/student_dashboard.js", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "/*student_dashboard.js*/\nconst studentJsonUrl = \"/results/student/json\";\nconst studentChartOptions = {\n credits: {\n enabled: false\n },\n chart: {\n renderTo: 'session_container',\n type: 'column',\n height: 350,\n zoomType: 'xy'\n },\n legend: {enabled: true},\n title: {text: \"Sessional Comparative Aggregate\"},\n xAxis: {labels: {rotation: -40}},\n yAxis: {title: {text: \"Percentages\"}},\n exporting: {\n buttons: {\n contextButton: {\n enabled: true,\n menuItems: null,\n symbol: \"menu\",\n }\n }\n },\n series: [],\n};\n\nfunction loadStudentJSON(path){\n $.getJSON(path,\n function(data) {\n studentChartOptions.subtitle = {text: \"Performance Details\"};\n studentChartOptions.xAxis.categories = data['subjects'];\n studentChartOptions.series[0] = ({\"name\": 'First Term', \"data\": data['first term']});\n studentChartOptions.series[1] = ({\"name\": 'Second Term', \"data\": data['second term']});\n studentChartOptions.series[2] = ({\"name\": 'Third Term', \"data\": data['third term']});\n var charts = new Highcharts.Chart(studentChartOptions);\n });\n}\n\n//For Student Current Performance\n$('#s_subject_select').change(function(e){\n e.preventDefault();\n let klass = $('#s_class_select').val();\n let subject = $('#s_subject_select').val();\n let term = $('#s_term_select').val();\n let path = studentJsonUrl +'?name=current_report&subject='+subject+'&term='+term+'&class='+klass;\n loadStudentJSON(path);\n});\n$('#s_term_select').change(function(e){\n e.preventDefault();\n let klass = $('#s_class_select').val();\n let subject = $('#s_subject_select').val();\n let term = $('#s_term_select').val();\n let path = studentJsonUrl +'?name=current_report&subject='+subject+'&term='+term+'&class='+klass;\n loadStudentJSON(path);\n});\n$('#s_class_select').change(function(e){\n e.preventDefault();\n let klass = $('#s_class_select').val();\n var subject = $('#s_subject_select').val();\n var term = $('#s_term_select').val();\n var path = studentJsonUrl +'?name=current_report&subject='+subject+'&term='+term+'&class='+klass;\n loadStudentJSON(path);\n});\n\nloadStudentJSON(studentJsonUrl +'?name=current_report&subject='+$('#s_subject_select').val()+'&term='+$('#s_term_select').val()+'&class='+$('#s_class_select').val());" }, { "alpha_fraction": 0.7222440242767334, "alphanum_fraction": 0.7248594164848328, "avg_line_length": 32.39301300048828, "blob_id": "dc28b360c178fe024e06db1715080c6539695855", "content_id": "939f65a8ac6b347590f4215445727a9d0954419c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7647, "license_type": "permissive", "max_line_length": 163, "num_lines": 229, "path": "/apps/staff/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nimport datetime\nfrom django.db.models import *\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.views.generic import TemplateView, ListView\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom django.forms.models import model_to_dict\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom .forms import *\nfrom staff.models import Teacher, Position\nfrom staff.utils import generate_staff_id\nfrom django.core import serializers\ntry:\n\timport json\nexcept:\n\timport simplejson as json\n\n\n@login_required\ndef accounts(request):\n\treturn render(request, 'staff/accounts.html', {})\n\n\nclass StaffListView(ListView):\n\tmodel = Teacher\n\ttemplate_name = 'staff/list.html'\n\tpaginated_by = settings.PAGE_SIZE\n\n\tdef get_queryset(self):\n\t\tlogged_user = self.request.user\n\t\tqueryset = Teacher.objects.filter(school=logged_user.teacher.school)\n\t\treturn queryset\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(StaffListView, self).get_context_data(**kwargs)\n\t\tqueryset = self.get_queryset()\n\n\t\tpaginator = Paginator(queryset, self.paginated_by)\n\t\tpage = self.request.GET.get('page')\n\n\t\ttry:\n\t\t\tqueryset = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\tqueryset = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t\tqueryset = paginator.page(paginator.num_pages)\n\n\t\tcontext['teachers'] = queryset\n\t\tcontext['count'] = self.get_queryset().count()\n\t\tcontext['num_unverified'] = len([{teacher: 'unverified'} for teacher in Teacher.objects.filter(school=self.request.user.teacher.school) if not teacher.verified])\n\t\treturn context\n\n\tmethod_decorator(user_passes_test(lambda u: u.teacher.is_admin))\n\tdef dispatch(self, request, *args, **kwargs):\n\t\treturn super(StaffListView, self).dispatch(request, *args, **kwargs)\n\nclass StaffAnalyticsView(TemplateView):\n\ttemplate_name = 'staff/analysis.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(StaffAnalyticsView, self).get_context_data(**kwargs)\n\t\tcontext['courses'] = Course.objects.filter(lecturers=self.request.user.lecturer)\n\t\tcontext['departments'] = get_lecturer_data(self.request)['dept']\n\t\tcontext['staff_metrics'] = staff_analytics_metrics(self.request.user)\n\t\tcontext['years'] = [i for i in range(datetime.date.today().year, 1998, -1 )]\n\t\tcontext['projects'] = Project.objects.filter(supervisor=self.request.user.lecturer).order_by('-last_modified').count()\n\n\t\treturn context\n\n\t@method_decorator(login_required)\n\tdef dispatch(self, request, *args, **kwargs):\n\t\treturn super(StaffAnalyticsView, self).dispatch(request, *args, **kwargs)\n\n\n@login_required\ndef chart_data_json(request):\n\tdata = None\n\tparams = request.GET\n\n\tlevel = params.get('level', '')\n\tcourse_id = params.get('course', '')\n\tsemester = params.get('semester', '')\n\tdept_id = params.get('dept', 'all')\n\tyear = params.get('year', '')\n\tname = params.get('name', '')\n\tcourse = Course.objects.get(pk=course_id)\n\tif course:\n\t\tif name == 'course_data':\n\t\t\tdept=None\n\t\t\tresults = None\n\t\t\tif dept_id != 'all':\n\t\t\t\tdept = Department.objects.get(pk=dept_id)\n\t\t\t\tresults = Result.objects.filter(course__lecturers=request.user.lecturer,\n\t\t\t\t\t\t\t\t\t\t\t\tcourse=course,\n\t\t\t\t\t\t\t\t\t\t\t\tdepartment=dept,\n\t\t\t\t\t\t\t\t\t\t\t\tdate_created__year=year)\n\t\t\t\tdata = ResultData.get_result_by_lecturer(results)\n\t\t\telse:\n\t\t\t\tresults = Result.objects.filter(course__lecturers=request.user.lecturer,\n\t\t\t\t\t\t\t\t\t\t\t\tcourse=course, date_created__year=year)\n\t\t\t\tdata = ResultData.get_result_by_lecturer(results)\n\t\telif name == 'course_average_by_dept':\n\t\t\tdata = ResultData.dept_avg_score(request.user.lecturer, course, year=year)\n\treturn HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@login_required\ndef register(request):\n\tif request.method == \"POST\":\n\t\tschool = request.user.teacher.school\n\t\tform = StaffCreationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\ttry:\n\t\t\t\tstaff = form.save(commit=False)\n\t\t\t\tstaff.school = school; staff.save()\n\t\t\t\tmessages.success(request, \"%s's record has been successfully created.\" % (staff))\n\t\t\t\treturn HttpResponseRedirect(reverse('staff:new-staff'))\n\t\t\texcept Exception as e:\n\t\t\t\tmessages.error(request, e)\n\telse:\n\t\tform = StaffCreationForm()\n\treturn render(request, 'staff/register.html', {'form': form})\n\n\n@login_required\ndef staff_profile(request, staff_slug):\n template_name = 'staff/staff_profile.html'\n staff = get_object_or_404(Teacher, slug=staff_slug)\n context = {'staff': staff}\n return render(request, template_name, context)\n\n\n@login_required\ndef edit_profile(request):\n\tcontext ={}\n\tteacher = get_object_or_404(Teacher, user=request.user)\n\ttemplate_name = 'staff/edit.html'\n\tif request.method == 'POST':\n\t\tform = StaffForm(request.POST, request.FILES, instance=teacher)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tmessages.success(request, 'Your profile has been successfully updated.')\n\t\t\treturn HttpResponseRedirect(reverse('staff:profile', kwargs={'staff_slug': teacher.slug}))\n\telse:\n\t\tform = StaffForm(instance=teacher)\n\tcontext['form'] = list(form)\n\tcontext['staff'] = teacher\n\treturn render(request, template_name, context)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef offices(request):\n\tpositions = Position.objects.filter(school=request.user.teacher.school)\n\tteachers = Teacher.objects.filter(school=request.user.teacher.school)\n\n\tofficers = [staff for staff in teachers if staff.position in positions]\n\ttemplate_name = 'staff/offices.html'\n\tif request.method == 'POST':\n\t\tform = PositionForm(request.POST)\n\t\tif form.is_valid():\n\t\t\toffice = form.save(commit=False)\n\t\t\toffice.school = request.user.teacher.school\n\t\t\toffice.save()\n\t\t\tmessages.success(request, \"The Office '%s' has been created\" % (office))\n\t\t\treturn HttpResponseRedirect(reverse('staff:office'))\n\telse:\n\t\tform = PositionForm()\n\t\tcontext = {\n\t\t\t\t'officers': officers,\n\t\t\t\t'form': form\n\t\t\t}\n\treturn render(request, template_name, context)\n\n\[email protected]\ndef staff_id_json(request):\n teacher = request.user.teacher\n if request.method == 'GET':\n staff_id = generate_staff_id(teacher.school)\n data = {\n \"staff_id\": staff_id,\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\ntransaction.atomic\ndef staff_admin_json(request):\n\tstaff_id = request.GET.get('staff_id')\n\tschool = request.user.teacher.school\n\tupdate_message = []\n\tlevel = 1\n\tteacher = get_object_or_404(Teacher, pk=staff_id)\n\n\t#Just checking to make sure the teacher is admin, then we toggle the status\n\tif teacher.is_admin:\n\t\tteacher.is_admin = False\n\t\tteacher.save()\n\t\tmessages.success(request, \"%s's staff admin status has been revoked\" % (teacher))\n\telse:\n\t\tif max_admin_reached(request,school):\n\t\t\tmessages.error(request, \"You have reached the maximum admins for your school\")\n\t\t\tlevel = 0\n\t\telse:\n\t\t\tteacher.is_admin = True\n\t\t\tteacher.save()\n\t\t\tmessages.success(request, \"%s has successfully been granted staff admin access\" % (teacher))\n\tfor message in messages.get_messages(request):\n\t\tupdate_message.append(\n\t\t\t{\n\t\t\t\t\"level\": level,\n\t\t\t\t\"message\": message.message,\n\t\t\t\t\"tags\": message.tags\n\t\t\t}\n\t\t)\n\tdata = {\n\t\t\"message\": update_message\n\t}\n\treturn HttpResponse(json.dumps(data), content_type=\"application/json\")\n\ndef max_admin_reached(request, school):\n\tupdate_message = []\n\tcurrent_admins = len([teacher for teacher in Teacher.objects.filter(school=school) if teacher.is_admin])\n\treturn current_admins >= school.config.plan.max_admin\n" }, { "alpha_fraction": 0.5484460592269897, "alphanum_fraction": 0.6142596006393433, "avg_line_length": 25.047618865966797, "blob_id": "a874fc0c92e35fb78702dcd148edc8ace4694c5d", "content_id": "c4cdd6c44ea47c6fa58f7175f3391f52d5cdec0c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "permissive", "max_line_length": 87, "num_lines": 21, "path": "/apps/insights/migrations/0004_groupassessment_students.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-09-02 17:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('students', '0002_student_special_aids'),\n ('insights', '0003_auto_20180902_1625'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='groupassessment',\n name='students',\n field=models.ManyToManyField(blank=True, null=True, to='students.Student'),\n ),\n ]\n" }, { "alpha_fraction": 0.675177276134491, "alphanum_fraction": 0.675177276134491, "avg_line_length": 46.06666564941406, "blob_id": "53dfb49b9e9a142443dec842309eafce90c1a9d6", "content_id": "7cd48845d9af6b8f8e6f55e753a32add7feaa268", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "permissive", "max_line_length": 92, "num_lines": 15, "path": "/apps/subjects/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Subject\nfrom staff.models import Teacher\n\nclass SubjectCreationForm(forms.ModelForm):\n def __init__(self, school, *args, **kwargs):\n super(SubjectCreationForm, self).__init__(*args, **kwargs)\n self.fields['teachers'].queryset = Teacher.objects.filter(school=school)\n self.fields['name'].widget.attrs = {'placeholder': 'Subject Title e.g. Mathematics'}\n self.fields['short_code'].widget.attrs = {'placeholder': 'Subject Code e.g. MATH'}\n self.fields['head_teacher'].queryset = Teacher.objects.filter(school=school)\n\n class Meta:\n model = Subject\n fields = ('name', 'short_code', 'teachers', 'head_teacher')" }, { "alpha_fraction": 0.5984721183776855, "alphanum_fraction": 0.6094652414321899, "avg_line_length": 30.20930290222168, "blob_id": "d288ce9fb3e53efcf16a4e691a5c729ae2af3a2d", "content_id": "2f244fa5dd646d490b8acbe81f7e224eafb07e65", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5367, "license_type": "permissive", "max_line_length": 106, "num_lines": 172, "path": "/apps/staff/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import slugify\nfrom institutions.models import StudentClass\nfrom django.utils.text import slugify\nimport uuid\n\nMARITAL_STATUS_CHOICES = (\n ('S', _(u'Single')),\n ('M', _(u'Married')),\n ('W', _(u'Widowed')),\n ('D', _(u'Divorced')),\n)\n\nSEX_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female'),\n)\n\n\nclass Position(models.Model):\n \"\"\"\n The position the lecturer occupies within the institution.\n \"\"\"\n school = models.ForeignKey('institutions.Institution', null=True)\n name = models.CharField('Office Title', max_length=100)\n reports_to = models.ForeignKey('self', null=True, blank=True, related_name='reports')\n description = models.TextField(blank=True)\n\n class Meta:\n verbose_name = _(u'Position')\n verbose_name_plural = _(u'Positions')\n ordering = ('name',)\n\n def __str__(self):\n return self.name\n\n\nclass Teacher(models.Model):\n TEACHER_TITLE = (\n ('Mr.', 'Mr'),\n ('Mrs', 'Mrs'),\n ('Miss', 'Miss'),\n ('Dr.', 'Dr'),\n ('Prof', 'Prof'),\n ('Mallam', 'Mallam'),\n )\n user = models.OneToOneField(User)\n school = models.ForeignKey('institutions.Institution', null=True)\n title = models.CharField(max_length=20, blank=True, null=True, choices=TEACHER_TITLE)\n staff_id = models.CharField(max_length=50, blank=True, null=True)\n first_name = models.CharField(max_length=50, blank=True, null=True)\n last_name = models.CharField(max_length=50, blank=True, null=True)\n qualification = models.ManyToManyField('staff.Qualification', blank=True)\n photo = models.ImageField(upload_to=\"uploads/%Y/%m/%d\", null=True, blank=True)\n gender = models.CharField(max_length=2, null=True, blank=True, choices=SEX_CHOICES)\n marital_status = models.CharField(max_length=2, null=True, blank=True, choices=MARITAL_STATUS_CHOICES)\n email = models.EmailField(blank=True, null=True)\n grade_level = models.PositiveIntegerField(null=True, blank=True)\n phone_number = models.CharField(max_length=20, null=True, blank=True)\n position = models.ForeignKey(Position, null=True, blank=True)\n is_admin = models.BooleanField(default=False)\n slug = models.SlugField(max_length=250, unique=True, null=True, blank=True)\n\n class Meta:\n ordering = ('first_name',)\n\n def __str__(self):\n return \"%s %s\" % (self.first_name, self.last_name)\n\n def save(self, *args, **kwargs):\n orig = slugify(self.last_name)\n if self.slug:\n self.slug = \"%s-%s\" % (orig, uuid.uuid4())\n else:\n self.slug = \"%s-%s\" % (orig, uuid.uuid4())\n super(Teacher, self).save(*args, **kwargs)\n\n @property\n def get_marital_status(self):\n marital_status = {'S': 'Single', 'M': 'Married', 'W': 'Widowed', 'D': 'Divorces'}\n if self.marital_status:\n return marital_status[self.marital_status]\n \n @property\n def get_gender(self):\n gender = {'F': 'Female', 'M': 'Male'}\n if self.gender:\n return gender[self.gender]\n\n @property\n def verified(self):\n return get_profile_complete(self) > 80\n\n @property\n def is_form_teacher(self):\n return self in get_form_teachers(self.school)\n\n\nclass Qualification(models.Model):\n QUALIFICATION_CHOICES = (\n ('PhD', 'PhD'),\n ('MSc', 'MSc'),\n ('BSc', 'BSc'),\n ('HND', 'HND'),\n ('OND', 'OND'),\n ('FSLC', 'FSLC'),\n )\n caption = models.CharField(max_length=10, choices=QUALIFICATION_CHOICES)\n institution = models.ForeignKey(\"institutions.Institution\")\n course = models.CharField(max_length=200)\n specify = models.CharField(\"Please specify if others\", max_length=200, null=True, blank=True)\n year_of_degree = models.DateField()\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.caption\n \n @property \n def percentage_complete(self):\n percent = get_profile_complete(self)\n return percent\n\n\n\ndef get_profile_complete(self):\n '''\n Get the percentage completeness of a staff profile\n '''\n percent = { \n 'title': 5, \n 'last_name': 10, \n 'email': 10, \n 'first_name': 10, \n 'photo': 10,\n 'qualification': 20, \n 'phone_number': 5,\n 'gender': 10,\n 'marital_status': 10,\n 'staff_id': 10,\n\n }\n\n total = 0\n if self.title:\n total += percent.get('title', 0)\n if self.last_name:\n total += percent.get('last_name', 0)\n if self.email:\n total += percent.get('email', 0)\n if self.first_name:\n total += percent.get('first_name', 0)\n if self.photo:\n total += percent.get('photo', 0)\n if self.qualification:\n total += percent.get('qualification', 0)\n if self.phone_number:\n total += percent.get('phone_number', 0)\n if self.gender:\n total += percent.get('gender', 0)\n if self.marital_status:\n total += percent.get('marital_status', 0)\n if self.staff_id:\n total += percent.get('staff_id', 0)\n\n #and so on\n return int(total)\n\n\ndef get_form_teachers(school):\n return [klass.form_teacher for klass in StudentClass.objects.filter(school=school)]" }, { "alpha_fraction": 0.6700336933135986, "alphanum_fraction": 0.6700336933135986, "avg_line_length": 54.75, "blob_id": "30e61ed6651353008d30beb08027568559e07c87", "content_id": "9c4df0e49a3c91e21689440566d11511fff650b5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 891, "license_type": "permissive", "max_line_length": 112, "num_lines": 16, "path": "/apps/insights/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', insights, name='home'),\n # url(r'^new-assessment$', create_assessment, name='create-assessment'),\n url(r'^assessment/(?P<uuid>\\w+)$', load_assessment, name='load-assessment'),\n url(r'^assessments$', get_assessments, name='assessments'),\n url(r'^student-json$', load_student_json, name='load-student-json'),\n url(r'^compare-students$', compare_students, name='compare-students'),\n url(r'^compare-api$', compare_api, name='compare-api'),\n url(r'^assessment-api$', assessment_api, name='assessment-api'),\n url(r'^update-group-api$', update_group_api, name='update-group-api'),\n url(r'^delete-group-api/(?P<group_id>\\d+)$', delete_group_api, name='delete-group-api'),\n url(r'^delete-assessment-api/(?P<assessment_id>\\d+)$', delete_assessment_api, name='delete-assessment-api'),\n]" }, { "alpha_fraction": 0.4508196711540222, "alphanum_fraction": 0.464232474565506, "avg_line_length": 57.36231994628906, "blob_id": "287e11669c55932b7d717584e3ef83fae62e2e08", "content_id": "f0565100f1ae155ece647cbe9bab652b1e75fde4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4026, "license_type": "permissive", "max_line_length": 168, "num_lines": 69, "path": "/sani_app/templates/_help.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% load static %}\n\n<div id=\"id_help\" class=\"modal fade\" role=\"dialog\">\n <div class=\"modal-dialog\">\n <!-- Modal content-->\n <div class=\"modal-content\">\n <div class=\"modal-header\" style=\"text-align: center; background: rgb(17, 153, 177); \">\n <h4 class=\"modal-title\" style=\"font-weight: 400; color: #fff;\">Sanify FAQs</h4>\n </div>\n <div class=\"modal-body\">\n <div class=\"row\" style=\"width: 100%;\">\n <div class=\"form-group\" style=\"margin-left: 5px;\">\n \n </div>\n <div class=\"cat-1\">\n <ul>\n <li>Menu</li>\n <li class=\"tablinks\" onclick=\"openTab(event, 'reg-tab')\">Getting Started...</li>\n <li class=\"tablinks\" onclick=\"openTab(event, 'clas-tab')\">How do I upload multiple results?</li>\n <li class=\"tablinks\" onclick=\"openTab(event, 'clus-tab')\">\n How can I get the registration number of students easily\n </li>\n <li class=\"tablinks\" onclick=\"openTab(event, 'clus-tab')\">\n How can I get the registration number of students easily\n </li>\n </ul>\n </div>\n <div class=\"cat-2 help scroll\" id=\"reg-tab\">\n Visit the <a href=\"https://sani.com.ng\">sanify site</a> and register your school to create a portal for your school.\n <img src=\"{% static 'img/reg.jpg' %}\" alt=\"registration\" width=\"100%\">\n Next is to create a grading configuration for your school by clicking the grading configuration\n menu.\n <hr>\n <img src=\"{% static 'img/grade.jpg' %}\" alt=\"\" width=\"100%\">\n Add subjects and classes to the account\n <img src=\"{% static 'img/class.jpg' %}\" alt=\"\" width=\"100%\">\n <hr>\n <img src=\"{% static 'img/sub.jpg' %}\" alt=\"\" width=\"100%\">\n <hr>\n And that's it. You're all set to get going with Sanify.\n <img src=\"{% static 'img/set.jpg' %}\" alt=\"\" width=\"100%\">\n <hr>\n You can start entering students records and staff records into the system now\n </div>\n <div class=\"cat-2 help scroll\" id=\"clas-tab\">\n To upload multiple results, Sanify can accept a CSV (Comma Seperated Values) file.\n A CSV file is like a table of records, but instead of border lines, it uses commas and\n a new line for each record.\n <hr>\n <img src=\"{% static 'img/csv_file.jpg' %}\" alt=\"\" width=\"100%\">\n <hr>\n The subject and class are entered using the short codes provided upon registration/creation if each.\n </div>\n <div class=\"cat-2 help scroll\" id=\"clus-tab\">\n Sanify provides an <a href=\"/reports/export\">export module</a> where all students, staff, subjects and classes can be easily\n printed out in PDF format for easy referencing. These records can be furthered filtered as the case \n may be.\n <hr>\n <img src=\"{% static 'img/export.jpg' %}\" alt=\"\" width=\"100%\"> \n </div>\n </div>\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"status cancel\" data-dismiss=\"modal\" style=\"padding: 4px 30px; border: none; background: rgb(172, 172, 172);\">Close</button>\n </div>\n </div>\n \n </div>\n</div>" }, { "alpha_fraction": 0.6174242496490479, "alphanum_fraction": 0.625, "avg_line_length": 25.375, "blob_id": "246007f91516132ee4c66b70a7402b52246e95e9", "content_id": "92649ab6f725a54fe2d8f1c3c13113e84c98fdec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "permissive", "max_line_length": 69, "num_lines": 40, "path": "/apps/states/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nclass Country(models.Model):\n code = models.CharField(max_length=3)\n name = models.CharField(max_length=50)\n\n class Meta:\n ordering = ('name',)\n verbose_name = _(u'Country')\n verbose_name_plural = _(u'Countries')\n\n def __str__(self):\n return self.name\n\n\nclass State(models.Model):\n code = models.CharField(max_length=3)\n name = models.CharField(max_length=30)\n\n class Meta:\n ordering = ('name',)\n verbose_name = _(u'Nigerian State')\n verbose_name_plural = _(u'Nigerian States')\n\n def __str__(self):\n return self.name\n\n\nclass LGA(models.Model):\n state = models.ForeignKey(State, related_name='local_govt_areas')\n name = models.CharField(max_length=30)\n\n class Meta:\n ordering = ('state', 'name',)\n verbose_name = _(u'Nigerian Local Government Area')\n verbose_name_plural = _(u'Nigerian Local Government Areas')\n\n def __str__(self):\n return self.name\n " }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 14.5, "blob_id": "1fb05440305031ab9510f87888d0729bcee818b3", "content_id": "0ac47a3185352593bd56df770f61d22d4affbf91", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "permissive", "max_line_length": 28, "num_lines": 8, "path": "/apps/appraisals/utils.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# utils.py\nclass AppraisalFlow(object):\n\n def __init__(self):\n return\n \n def __repr__(self):\n return \n \n" }, { "alpha_fraction": 0.6023013591766357, "alphanum_fraction": 0.6209996342658997, "avg_line_length": 59.456520080566406, "blob_id": "a3b4b9f8db4961ab8b1d509766515d6326fdbd7e", "content_id": "35b2012b02ca91bd0d781f5ff0029997b8da4c18", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2781, "license_type": "permissive", "max_line_length": 234, "num_lines": 46, "path": "/apps/config/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Config',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('current_performance', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('previous_performance', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('current_class_average', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('previous_class_average', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('show_attendance', models.BooleanField(default=False, verbose_name='Use psychomotors in report')),\n ('automatic_update', models.BooleanField(default=False, verbose_name='Update Class Automatically')),\n ('session_average', models.BooleanField(default=False, verbose_name='Generate a sessional average')),\n ('use_position', models.BooleanField(default=True, verbose_name='Generate position in report')),\n ('update', models.CharField(choices=[('Quarterly', 'Quarterly'), ('Anually', 'Annually')], default='Card', max_length=20, verbose_name='How often do you want the performance updated?')),\n ('payment_method', models.CharField(choices=[('Bank', 'Pay Through Bank Teller'), ('Card', 'Pay Via Card'), ('Cash', 'On Site Payment')], default='Quarterly', max_length=20, verbose_name='How would you like to pay?')),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='StudentConfig',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('current_performance', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('previous_performance', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('current_class_average', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('previous_class_average', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7945945858955383, "alphanum_fraction": 0.7945945858955383, "avg_line_length": 22.125, "blob_id": "9ccae1b7cc6ef08f27f7794171bd01257afcec22", "content_id": "39d85760717e3546586e4993c624bb4e3730bcd6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "permissive", "max_line_length": 39, "num_lines": 8, "path": "/apps/states/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Country, State, LGA\n\nadmin.site.register(Country)\nadmin.site.register(State)\nadmin.site.register(LGA)\n\n# Register your models here.\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.650905430316925, "avg_line_length": 40.45833206176758, "blob_id": "690c8b50c804a31dc5659140d7ac539d4a279640", "content_id": "4732d29b9f165e18ff6c519f12c9ee402b54a36d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 994, "license_type": "permissive", "max_line_length": 150, "num_lines": 24, "path": "/apps/payments/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nimport datetime\nfrom .models import TokenApplication\n\n\nclass ApplicationForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(ApplicationForm, self).__init__(*args, **kwargs)\n self.fields['session'].widget.attrs = {'class': 'form-control input-sm in-search', 'placeholder': 'Enter current session here e.g. 2012/2013'}\n self.fields['term'].widget.attrs = {'class': 'form-control input-sm in-search'}\n \n class Meta:\n model = TokenApplication\n fields = ('session', 'term')\n\nclass TellerPaymentForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(TellerPaymentForm, self).__init__(*args, **kwargs)\n self.fields['teller_number'].widget.attrs = {'class': 'form-control input-sm in-search'}\n self.fields['teller_date'].widget.attrs = {'class': 'form-control input-sm in-search'}\n\n class Meta:\n model = TokenApplication\n fields = ('teller_number', 'teller_date')" }, { "alpha_fraction": 0.6578640341758728, "alphanum_fraction": 0.659468948841095, "avg_line_length": 37.290504455566406, "blob_id": "77598ce0191bf3f361050a9ee2b72a0c66d13883", "content_id": "bbe30215a165bab2d911413365278e757b8c3424", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6854, "license_type": "permissive", "max_line_length": 145, "num_lines": 179, "path": "/apps/institutions/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import TemplateView, ListView\nfrom .models import StudentClass\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.core import serializers\nfrom students.models import Student\nfrom itertools import chain\nfrom django.db import transaction\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, authenticate\nfrom config.models import Config\nfrom staff.models import Teacher\nfrom payments.models import Plan\nfrom config.utils import pin_generator\nfrom staff.utils import generate_staff_id\nfrom datetime import datetime\ntry:\n import json\nexcept:\n import simplejson as json\n\n\nclass StudentClassListView(ListView):\n model = StudentClass\n template_name = 'institutions/classes.html'\n context_object_name = 'classes'\n\n def get_queryset(self):\n queryset = super(StudentClassListView, self).get_queryset()\n queryset = queryset.filter(school=self.request.user.teacher.school)\n return queryset\n\n @method_decorator(login_required)\n @method_decorator(user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/'))\n def dispatch(self, request, *args, **kwargs):\n return super(StudentClassListView, self).dispatch(request, *args, **kwargs)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef create_class(request):\n template_name = 'institutions/new_class.html'\n teacher = request.user.teacher\n \n if request.method == 'POST':\n form = ClassCreationForm(teacher.school, request.POST, request.FILES)\n if form.is_valid:\n my_class = form.save(commit=False)\n my_class.school = request.user.teacher.school\n my_class.save()\n messages.success(request, \"Class '%s' was created successfully\" % (my_class))\n return HttpResponseRedirect(reverse('institution:class-list'))\n else:\n form = ClassCreationForm(teacher.school)\n return render(request, template_name, {'form': form})\n\n\[email protected]\ndef create_school(request):\n template_name = 'institutions/signup.html'\n if request.method == 'POST':\n params = request.POST\n f_name = params.get('f_name')\n l_name = params.get('l_name')\n form = SchoolCreationForm(request.POST)\n if form.is_valid():\n #Run this block to ensure that everything goes well before\n #commiting the create user transaction into the database, else\n #everything should be rolled back\n try:\n #Create the School Profile\n school = form.save(commit=False)\n school.save()\n\n #Create a configuration setup for the school\n plan = Plan.objects.filter(amount=0.0)[0]\n Config.objects.create(school=school, plan=plan)\n except Exception as e:\n messages.error(request, e)\n return HttpResponseRedirect(reverse('institution:new-school'))\n\n #Create user login details\n user = User.objects.create_user(form.cleaned_data['email'], password=form.cleaned_data['email'], first_name=f_name, last_name=l_name)\n user.save()\n\n #Create the admin staff\n teacher = Teacher(\n user=user,\n first_name=f_name,\n email=form.cleaned_data['email'],\n last_name=l_name,\n gender='M',\n school=school,\n is_admin=True\n )\n teacher.staff_id = generate_staff_id(school)\n teacher.save()\n\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user) #login the user\n messages.success(request, \"Congratulations! Your school, '%s', was created successfuly\" % (school))\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n form = SchoolCreationForm()\n return render(request, template_name, {'form': form})\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u,'teacher'), login_url='/auth/login/')\ndef get_class_json(request):\n data = None\n params = request.GET\n class_id = params.get('class_id')\n\n #Get the class with the id\n student_class = StudentClass.objects.get(id=class_id)\n #filter students in the class\n students = Student.objects.filter(student_class=student_class)\n\n data = {\n 'current_class': serializers.serialize('json', [student_class, ]),\n 'students': serializers.serialize('json', students)\n }\n\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\[email protected]\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef edit_class(request, class_id):\n teacher = request.user.teacher\n template_name = 'institutions/class_edit.html'\n klazz = StudentClass.objects.get(pk=class_id)\n if request.method == \"POST\":\n form = ClassCreationForm(teacher.school, request.POST, request.FILES, instance=klazz)\n if form.is_valid():\n form.save()\n messages.success(request, \"The class '%s' was successfully updated\" % (klazz))\n return HttpResponseRedirect(reverse('institution:class-list'))\n else:\n form = ClassCreationForm(teacher.school, instance=klazz)\n return render(request, template_name, {'form': form, 'class': klazz})\n\n\[email protected]\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef edit_institution(request):\n teacher = request.user.teacher\n template_name = 'institutions/institution_edit.html'\n institution = Institution.objects.get(pk=teacher.school.id)\n if request.method == \"POST\":\n form = SchoolForm(request.POST, request.FILES, instance=institution)\n if form.is_valid():\n form.save()\n messages.success(request, \"School records successfully updated\")\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n form = SchoolForm(instance=institution)\n return render(request, template_name, {'form': form, 'institution': institution})\n\n\[email protected]\ndef reg_json(request):\n if request.method == 'GET':\n reg_id = 'SANIFY_'+pin_generator(length=15)\n data = {\n \"reg_id\": reg_id,\n \"success\": \"Successful!\",\n \"status\": 200,\n \"is_unique\": True,\n \"date_created\":\"%s\" % (datetime.today())\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n" }, { "alpha_fraction": 0.7401130199432373, "alphanum_fraction": 0.7401130199432373, "avg_line_length": 28.5, "blob_id": "73b531c3189014a876629bee6abcb967197e69d5", "content_id": "0fc7a5f6767ddd46fefd94f7c94a7598f02c1a65", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "permissive", "max_line_length": 57, "num_lines": 6, "path": "/apps/subjects/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n\[email protected](Subject)\nclass SubjectAdmin(admin.ModelAdmin):\n list_display = ('name', 'short_code', 'date_entered')\n" }, { "alpha_fraction": 0.6753246784210205, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 54, "blob_id": "79923e1aff645f740efa2870a172a9a687c2dc73", "content_id": "2f75b0bac85c1a085c46b1235634df31c2c70760", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "permissive", "max_line_length": 132, "num_lines": 7, "path": "/apps/states/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\nfrom states.models import Country, State, LGA\n\nurlpatterns = patterns('states.views',\n url(r'^state/find-by-country/$', 'filter', {'model_class': State, 'field_name': 'country__pk'}, name='states_state_by_country'),\n url(r'^lga/find-by-state/$', 'filter', {'model_class': LGA, 'field_name': 'state__pk'}, name='states_lga_by_state'),\n)\n" }, { "alpha_fraction": 0.68034827709198, "alphanum_fraction": 0.6865671873092651, "avg_line_length": 35.45454406738281, "blob_id": "727e612c0758aef46ec8f984a0fbe3dbb76b347e", "content_id": "704bff514dd2852ba834b9f4957a3ce9d56c22dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "permissive", "max_line_length": 105, "num_lines": 22, "path": "/apps/subjects/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass SubjectManager(models.Manager):\n pass\n\nclass Subject(models.Model):\n school = models.ForeignKey('institutions.Institution', null=True)\n name = models.CharField(max_length=150)\n short_code = models.CharField(max_length=10, null=True, blank=True)\n teachers = models.ManyToManyField('staff.Teacher')\n head_teacher = models.ForeignKey('staff.Teacher', related_name=\"head_teacher\", null=True, blank=True)\n date_entered = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n objects = SubjectManager()\n\n def __str__(self):\n return \"(%s) %s\" % (self.short_code, self.name)\n\n class Meta:\n unique_together = ('name', 'short_code')\n ordering = ('name',)\n\n\n" }, { "alpha_fraction": 0.5533784627914429, "alphanum_fraction": 0.5592854022979736, "avg_line_length": 30.41176414489746, "blob_id": "c19ca01ffe717474ac44af586e62ad47e8436cf9", "content_id": "e72d31e87ac4a0ab98674dbe2d3ae00388c6a1e1", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6941, "license_type": "permissive", "max_line_length": 139, "num_lines": 221, "path": "/sani_app/static/js/assessment.js", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "// Retrieve the parameters in the the URL\n// for executing the GET request \nconst DJANGO_STATIC_URL = '{{ STATIC_URL }}';\nlet JSONURL = \"/insights/assessment-api\";\nlet currentURL = document.location.href;\nlet pathParams = currentURL.split('/')[4].split('?')[1]\nvar assessment = document.location.pathname.split('/')[3];\n\nfunction assessmentHandler(data) {\n //get the data\n var content = '';\n let students = data.students;\n let assessment = data.assessment;\n $(\"#heading_container\").html(`\n <h4 style=\"margin-bottom: 0; font-weight: 300;\">Assessment: <span style=\"font-weight: bold;\">${assessment.name}</span></h4>\n <h4 style=\"margin-top: 8px; font-weight: 300;\">Assessment Type: <span style=\"font-weight: bold;\">${assessment.type}</span></h4>\n `);\n let innerContent;\n\n if (students.length == 0) {\n innerContent = `<tr>\n <td style=\"text-align: center; font-size: 16px;\" colspan=\"14\">No Available Records To Show.</td>\n </tr>`\n content += innerContent;\n $(\"#assessment_table\").html(content);\n return;\n }\n let snID = 1;\n for (student of students) {\n innerContent = `\n <tr>\n <td class=\"zui-sticky-col-1\">${snID}.</td>\n <td class=\"zui-sticky-col-f-name\">${student.first_name}</td>\n <td class=\"zui-sticky-col-l-name\">${student.last_name}</td>\n <td class=\"zui-sticky-col-reg\">${student.reg_number}</td>\n <td>${student.gender}</td>\n <td>${student.dob}</td>\n <td>${student.state}</td>\n <td>${student.nationality}</td>\n <td>${student.class}</td>\n <td>${student.aids}</td>\n <td>No</td>\n <td>Yes</td>\n <td>No</td>\n <td>${student.ranking}</td>\n <td>${student.performance.average}</td>\n <td>${student.performance.total}</td>\n <td>${student.performance.grade}</td>\n </tr>\n `\n content += innerContent;\n snID+=1;\n }\n $(\"#assessment_table\").html(content);\n}\n\n$(document).ready(function() {\n let path = `${JSONURL}?assessment=${assessment}`;\n loadAssessment(path);\n})\n\nconsole.log();\nvar studentChartOptions = {\n credits: {\n enabled: false\n },\n chart: {\n type: 'column',\n height: 280,\n zoomType: 'xy'\n },\n legend: {enabled: true},\n xAxis: {labels: {rotation: -40}},\n yAxis: {title: {text: \"Percentages\"}},\n exporting: {\n buttons: {\n contextButton: {\n enabled: true,\n menuItems: null,\n symbol: \"menu\",\n }\n }\n },\n series: [],\n};\n\nfunction loadStudentJSON(path){\n $.getJSON(path, loadData);\n}\n\nif (pathParams != undefined) {\n $(document).ready(loadStudentJSON(JSONURL+\"?\"+pathParams)); \n} \n\n$(\"input[name=gender]\").on('click', (e) => {\n filterSet(e);\n});\n\n$(\"input[name=term]\").on('click', function(e) {\n filterSet(e);\n});\n\n$(\"input[name=class]\").on('click', function(e) {\n filterSet(e);\n});\n\n$(\"input[name=gender]\").on('click', function(e) {\n filterSet(e);\n});\n\n$(\"input[name=aids]\").on('click', function(e) {\n filterSet(e);\n});\n\n$(\"input[name=rank]\").on('change', function(e) {\n filterSet(e);\n});\n// filter for average\n$(\"input[name=averages]\").on('change', function(e) {\n filterSet(e);\n});\n\n$(\"select[name=sorter]\").on('change', function(e) {\n filterSet(e);\n});\n\n\nfunction filterSet(event) {\n /* \n SUMMARY: \n Filter function for the students assessment data \n \n ARGS: {Event event}\n\n RETURN: {undefined}\n */\n\n //declare lists to hold the various filter values\n let class_list = [];\n let term_list = [];\n let gender_list = [];\n let state_list = [];\n let aids_list = [];\n\n //respond with an AJAX call when state changes for the various\n //filters\n $('input[name=term]:checked').each(function() {\n let term_id = parseInt(this.value);\n term_list.push(term_id);\n });\n $('input[name=class]:checked').each(function() {\n let class_id = parseInt(this.value);\n class_list.push(class_id);\n });\n \n $('input[name=gender]:checked').each(function() {\n let genderId = this.value;\n gender_list.push(genderId);\n });\n \n $('input[name=aids]:checked').each(function() {\n let id = this.value;\n aids_list.push(id);\n });\n\n $('input[name=geo_location]:checked').each(function() {\n let stateId = this.value;\n state_list.push(stateId);\n });\n\n \n loadAssessment(JSONURL+\"?assessment=\"+assessment\n +\"&aids=\"+aids_list+\"&gender=\"+gender_list\n +\"&class=\"+class_list+\"&geo_location=\"+state_list\n +\"&rank=\"+$('input[name=rank]').val()\n +\"&averages=\"+$('input[name=averages]').val()\n +\"&sorter=\"+$('select[name=sorter]').val());\n event.stopImmediatePropagation();\n}\n\nfunction loadData(data) {\n /*Load the comparative students data into highchartJS*/\n let studentsRecords = data.students_records;\n var assessment = data.assessment;\n let myCharts = document.getElementById(\"container\");\n myCharts.innerHTML = '';\n if (studentsRecords.length > 5 || studentsRecords.length <= 1) {\n \n } else {\n studentsRecords.forEach(record => {\n let data = record['result'];\n let newDiv = document.createElement(\"div\");\n newDiv.setAttribute(\"class\", \"compare-chart\");\n newDiv.setAttribute(\"style\", \"\");\n newDiv.setAttribute(\"id\", JSON.stringify(record.name));\n myCharts.appendChild(newDiv);\n \n // Rendering the chart\n studentChartOptions.chart['renderTo'] = JSON.stringify(record.name);\n studentChartOptions.title = {text: `Performance Details for ${record.name}-(${record.reg_number})`};\n studentChartOptions.subtitle = {text: `Assessment: ${assessment}`};\n studentChartOptions.xAxis.categories = data['subjects'];\n studentChartOptions.series[0] = ({\"name\": 'First Term', \"data\": data['first term']});\n studentChartOptions.series[1] = ({\"name\": 'Second Term', \"data\": data['second term']});\n studentChartOptions.series[2] = ({\"name\": 'Third Term', \"data\": data['third term']});\n let charts = new Highcharts.Chart(studentChartOptions);\n });\n }\n updateMessages(data['message']);\n}\n\nfunction loadAssessment(path) {\n let placeholder;\n placeholder = `\n <tr>\n <td style=\"text-align: left; padding-left: 40px; font-size: 15px;\" colspan=\"14\">Retrieving data. Please wait... \n <img src=\"/static/img/ripple-load.gif\" alt=\"\" width=\"45\"></td>\n </tr>`\n $(\"#assessment_table\").html(placeholder);\n $.getJSON(path, assessmentHandler);\n}" }, { "alpha_fraction": 0.604774534702301, "alphanum_fraction": 0.604774534702301, "avg_line_length": 26.418182373046875, "blob_id": "0d032cf910fec56a652e299f4cbc55fadf47fe6a", "content_id": "16ec2c905f81fbf7bff9e84921ae5256f11b8e24", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1508, "license_type": "permissive", "max_line_length": 124, "num_lines": 55, "path": "/apps/reports/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Report\nfrom institutions.models import StudentClass\n\nclass SimpleRemarkForm(forms.ModelForm):\n\n class Meta:\n model = Report\n exclude = ('school',)\n\n\nclass CompleteRemarkForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(CompleteRemarkForm, self).__init__(*args, **kwargs)\n #self.fields['promoted_to'].queryset = [klass.generic_class for klass in StudentClass.objects.filter(school=school)]\n\n class Meta:\n model = Report\n fields = ('student','form_teacher_remark', 'head_remark', 'promoted_to', 'verified')\n\n\nclass FormReportForm(forms.ModelForm):\n\n class Meta:\n model = Report\n fields = ('student','form_teacher_remark')\n\n\nclass HeadTeacherForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(HeadTeacherForm, self).__init__(*args, **kwargs)\n #self.fields['promoted_to'].queryset = [klass.generic_class for klass in StudentClass.objects.filter(school=school)]\n\n class Meta:\n model = Report\n fields = ('student', 'head_remark', 'promoted_to', 'verified')\n\n\n\nclass PsychomotorForm(forms.ModelForm):\n\n class Meta:\n model=Report\n fields = (\n 'student',\n 'attentiveness',\n 'attendance',\n 'hardworking',\n 'neatness',\n 'reliability',\n 'games',\n 'craft',\n 'punctuality',\n 'relationship_with_others',\n )\n" }, { "alpha_fraction": 0.7044625282287598, "alphanum_fraction": 0.706988513469696, "avg_line_length": 43.97468185424805, "blob_id": "565b55873c7ebbd070732d64afafe1b6e1e0d7d3", "content_id": "22b904f299d7dd6e32e47a21b64f0291a2f723be", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3563, "license_type": "permissive", "max_line_length": 118, "num_lines": 79, "path": "/apps/reports/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\nVERY_WEAK, WEAK, FAIR, GOOD, EXCELLENT = range(1,6)\nPYSCHOMOTOR_CHOICES = (\n (VERY_WEAK, 'Very Weak'),\n (WEAK, 'Weak'),\n (FAIR, 'Fair'),\n (GOOD, 'Good'),\n (EXCELLENT, 'Excellent')\n)\n\nclass ReportBatch(models.Model):\n school = models.ForeignKey('institutions.Institution', null=True)\n term = models.PositiveIntegerField(choices=settings.TERM_CHOICES, null=True)\n session = models.CharField(max_length=15, null=True, blank=True)\n school_resume_date = models.DateField(null=True, blank=True)\n\n def __str__(self):\n return 'Batch/%s/%s' % (self.session, self.term)\n\n def get_term(self):\n terms = {1:'First Term', 2:'Second Term', 3:'Third Term'}\n return terms[self.term].upper()\n \n class Meta:\n verbose_name = u'Report Batch'\n verbose_name_plural = u'Report Batches'\n\n\nclass Report(models.Model):\n batch = models.ForeignKey(ReportBatch, null=True)\n student = models.ForeignKey('students.Student', null=True)\n student_class = models.ForeignKey('institutions.StudentClass', null=True)\n form_teacher_remark = models.TextField(null=True, blank=True)\n head_remark = models.TextField('Principal/Headmaster Remark', null=True, blank=True)\n attentiveness = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n attendance = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n hardworking = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n neatness = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n reliability = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n games = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n craft = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n punctuality = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n relationship_with_others = models.PositiveIntegerField(choices=PYSCHOMOTOR_CHOICES, null=True, blank=True)\n verified = models.BooleanField(default=False)\n promoted_to = models.ForeignKey('institutions.StudentClass', related_name='promoted_class', null=True, blank=True)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n modified_by = models.ForeignKey('staff.Teacher', null=True)\n\n def __str__(self):\n return \"Report of %s in batch: %s\" % (self.student,self.batch)\n\n def verification(self):\n verification = {1: 'Verified', 0: 'Unverified'}\n return verification[self.verified]\n\n class Meta:\n ordering = ('-date_created', '-date_modified')\n \n def save(self, **kwargs):\n if self.promoted_to:\n self.student.student_class = self.promoted_to\n self.student.save()\n super(Report, self).save(**kwargs)\n\n\nclass BroadSheet(models.Model):\n school = models.ForeignKey('institutions.Institution', null=True)\n batch = models.ForeignKey(ReportBatch, null=True)\n student_class = models.ForeignKey('institutions.StudentClass')\n date_created = models.DateTimeField(auto_now_add=True, null=True)\n date_modified = models.DateTimeField(auto_now=True, null=True)\n modified_by = models.ForeignKey('staff.Teacher', null=True)\n\n def __str__(self):\n return \"Broadsheet for %s in %s\" % (self.student_class, self.batch)\n \n \n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 30, "blob_id": "ef5be9821e697f577349d2bc1f08b153eec5add5", "content_id": "597769979161132fcddbdd20b7b316b061a1a5dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 92, "license_type": "permissive", "max_line_length": 47, "num_lines": 3, "path": "/pytest.ini", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "[pytest]\nDJANGO_SETTINGS_MODULE = sani_app.test_settings\naddopts = --cov=. --cov-report=html" }, { "alpha_fraction": 0.6475155353546143, "alphanum_fraction": 0.6475155353546143, "avg_line_length": 39.25, "blob_id": "84ce28f279b17cff2bd0b62a938aa3968aecca80", "content_id": "851d19606fb0f7b876becc0d3758b2acf0855429", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "permissive", "max_line_length": 81, "num_lines": 16, "path": "/apps/contacts/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom contacts.forms import ContactForm\nfrom django.contrib import messages\n\ndef contact_us(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Your message has been sent, thank you.')\n return redirect('contacts:contact')\n else:\n messages.error(request, 'Kindly correct the errors below and resend')\n return render(request, 'contact.html', {'form':form})\n else:\n return render(request, 'contact.html', {'form':ContactForm()})\n" }, { "alpha_fraction": 0.6018009185791016, "alphanum_fraction": 0.6198099255561829, "avg_line_length": 37.44230651855469, "blob_id": "b2f4c4cd5c6d06a11bd8ad33f61ac2288fff1d01", "content_id": "8ec441b552f82c8088166839f22241813b795058", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1999, "license_type": "permissive", "max_line_length": 123, "num_lines": 52, "path": "/apps/insights/migrations/0003_auto_20180902_1625.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-09-02 14:25\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('institutions', '0005_auto_20180622_1055'),\n ('insights', '0002_assessment_uuid'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='assigngroupassessment',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='datasheet',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='groupassessment',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AddField(\n model_name='stashassessment',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AlterField(\n model_name='assessment',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AlterField(\n model_name='evaluation',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n migrations.AlterField(\n model_name='measure',\n name='school',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n ]\n" }, { "alpha_fraction": 0.597122311592102, "alphanum_fraction": 0.597122311592102, "avg_line_length": 52, "blob_id": "6831d060f7b57704b040ca2abf9795309c9b511f", "content_id": "123fa3934a3c5971d731d493ab553478c259a733", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "permissive", "max_line_length": 99, "num_lines": 21, "path": "/apps/payments/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^application$', apply, name='apply'),\n url(r'^list$', PaymentListView.as_view(), name='list'), \n url(r'^tokens/$',generate, name='generate'), \n url(r'^pin/print/(?P<token>[\\w-]+)$', generate_or_print_tokens, name='print'), \n url(r'^cancel/(?P<payment_id>[\\w-]+)$',cancel_payment, name='cancel'), \n url(r'^search/$',search, name='search'), \n url(r'^api$',search_pin_api, name='search-json'), \n url(r'^new$',new_pin, name='new'), \n url(r'^json-pin$',json_pin, name='json-pin'), \n url(r'^validate$',validation, name='validate'), \n url(r'^validate/pin$',get_validated_pin, name='json-validate'), \n url(r'^pay$',make_payment, name='pay'), \n url(r'^checkout/(?P<payment_id>[\\w-]+)$',initiate_payment, name='initialize'), \n url(r'^payment_confirmed$',payment_confirmed, name='confirmed'), \n url(r'^token_without_pay/(?P<payment_id>[\\w-]+)$',activate_without_pay, name='without-pay'), \n url(r'^teller-pay/(?P<payment_id>[\\w-]+)$',teller_pay, name='teller-pay'), \n]" }, { "alpha_fraction": 0.8222222328186035, "alphanum_fraction": 0.8222222328186035, "avg_line_length": 22, "blob_id": "51237977c3c0cfa392206ac41967cd98b3a6f7c9", "content_id": "23cd9d955ae815a11729b973176c71058dbbab61", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "permissive", "max_line_length": 23, "num_lines": 2, "path": "/apps/appraisals/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# appraisals/forms.py\nfrom django import form" }, { "alpha_fraction": 0.7628865838050842, "alphanum_fraction": 0.7628865838050842, "avg_line_length": 26.85714340209961, "blob_id": "9dde769b57808f6ef26bcb0c437bdc4cff5ed55f", "content_id": "d73c61d7fb7e69488516618f1fb9040f6a32d15f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "permissive", "max_line_length": 40, "num_lines": 7, "path": "/apps/staff/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Teacher\n\n# Register your models here.\[email protected](Teacher)\nclass TeacherAdmin(admin.ModelAdmin):\n list_display = ('email', 'staff_id')" }, { "alpha_fraction": 0.5661975145339966, "alphanum_fraction": 0.5714125633239746, "avg_line_length": 30.789661407470703, "blob_id": "25632dc7d12e6101276f94a51e88d1fba2a1ebd1", "content_id": "a705da4348aedbf2df050eb14a61f72123524924", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 17833, "license_type": "permissive", "max_line_length": 129, "num_lines": 561, "path": "/sani_app/static/js/insight.js", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n loadAssessment();\n $('.filter').hide();\n loadGroup();\n});\n \n$('#nav a').on('mouseover', function() {\n $('#nav a').removeClass('clicked');\n $(this).addClass('clicked');\n});\n\n$('#nav a').on('mouseleave', function() {\n $('#nav a').removeClass('clicked');\n})\n\nfunction toggleMe(myId) {\n $(`#${myId}`).slideToggle(300, 'swing');\n}\n\n$('.cat-2').hide();\nfunction openTab(evt, tabName) {\n var i, tabcontent, tablinks;\n tabcontent = document.getElementsByClassName(\"cat-2\");\n for (i = 0; i < tabcontent.length; i++) {\n tabcontent[i].style.display = \"none\";\n }\n tablinks = document.getElementsByClassName(\"tablinks\");\n for (i = 0; i < tablinks.length; i++) {\n tablinks[i].className = tablinks[i].className.replace(\" active-x\", \"\");\n }\n document.getElementById(tabName).style.display = \"block\";\n evt.currentTarget.className += \" active-x\";\n}\n\n// Check all data sheets for the data sheet module\n$('input[name=delete_list]').on('click', function() {\n if ($(this).is(':checked')) {\n $('.lbl-title span').css('display', 'inline');\n $('input[name=sheet]').prop('checked', true);\n } else {\n $('.lbl-title span').css('display', 'none');\n $('input[name=sheet]').prop('checked', false);\n }\n});\n\n// Check all groups for the assign group module\n$('input[name=all_group]').on('click', function() {\n if ($(this).is(':checked')) {\n $('input[name=group]').prop('checked', true);\n } else {\n $('input[name=group]').prop('checked', false);\n }\n});\n\n// Check all subjects in the Student Comparative Module\n$('#id_check_all').on('click', function() {\n if ($(this).is(':checked')) {\n $('input[name=comparative_subjects]').prop('checked', true);\n } else {\n $('input[name=comparative_subjects]').prop('checked', false);\n }\n});\n\n\nvar tdIndex = 1;\nfunction createTR() {\n //Add new data sheet\n let newTR = document.createElement('TR');\n let newTDSave = document.createElement('TD');\n let newTDName = document.createElement('TD');\n let newTDDate = document.createElement('TD');\n let newTDType = document.createElement('TD');\n let newTDFile = document.createElement('TD');\n\n //Create ids for the TDs\n const idName = 'id_name_'+tdIndex;\n const idSave = 'id_save_'+tdIndex;\n const idDate = 'id_date_'+tdIndex;\n const idType = 'id_type_'+tdIndex;\n const idFile = 'id_file_'+tdIndex;\n\n //Assign to each TD an id\n newTDSave.setAttribute('id', idSave);\n newTDDate.setAttribute('id', idDate);\n newTDType.setAttribute('id', idType);\n newTDFile.setAttribute('id', idFile);\n newTDName.setAttribute('id', idName);\n\n //Create input tags\n let inputName = document.createElement('INPUT');\n let inputDate = document.createElement('INPUT');\n let inputType = document.createElement('INPUT');\n let inputFile = document.createElement('INPUT');\n let iconSave = document.createElement('I');\n\n //Assign Types and append to the TDs\n //Name\n inputName.setAttribute('type', 'text');\n inputName.setAttribute('placeholder', 'Enter Name Here...');\n newTDName.appendChild(inputName);\n \n //Save\n iconSave.setAttribute('class', 'icons icons-sm icons-save');\n iconSave.setAttribute('id', idSave);\n newTDSave.appendChild(iconSave);\n\n //AssessmentType\n inputType.setAttribute('type', 'text');\n inputType.setAttribute('placeholder', 'Assessment Type...');\n newTDType.appendChild(inputType);\n \n //Date\n inputDate.setAttribute('type', 'date');\n newTDDate.appendChild(inputDate);\n \n ///File\n inputFile.setAttribute('type', 'file');\n newTDFile.appendChild(inputFile);\n \n newTR.appendChild(newTDSave);\n newTR.appendChild(newTDName);\n newTR.appendChild(newTDDate);\n newTR.appendChild(newTDType);\n newTR.appendChild(newTDFile);\n\n let tBody = document.getElementById('id_dsheet');\n tBody.append(newTR);\n\n tdIndex+=1;\n if (tdIndex=>2) {\n $(\"#id_create\").hide();\n }\n}\n\nfunction loadData() {\n let state = document.readyState;\n if (state == 'interactive') {\n document.getElementById('main-wrapper').style.visibility=\"hidden\";\n document.getElementById('load').style.visibility=\"visible\";\n } else if (state == 'complete') {\n startLoader();\n }\n}\n\nfunction startLoader() {\n setTimeout(function(){\n // document.getElementById('interactive');\n document.getElementById('load').style.visibility=\"hidden\";\n document.getElementById('main-wrapper').style.visibility=\"visible\";\n }, 1000);\n}\n\n$(\"#id_new_assessment\").submit(function(e){\n e.preventDefault();\n e.stopImmediatePropagation();\n let matched_list = [];\n $('.selection-class:checked').each(function() {\n let classId = parseInt(this.value);\n matched_list.push(classId);\n });\n \n let myForm = document.forms.namedItem(\"new-assessment\"); \n let myData = new FormData(myForm);\n if (matched_list.length) {\n myData.append('student_class', matched_list);\n //myData.append('csrfmiddlewaretoken', document.getElementsByName('csrfmiddlewaretoken')[0].value);\n }\n\n let jsonFormData = {}\n for (const [key, value] of myData.entries()) {\n if (key == \"student_class\") {\n jsonFormData[key] = matched_list;\n } else {\n jsonFormData[key] = value;\n }\n }\n $.ajax({\n url: \"/api/v1.0/assessments\",\n method: \"POST\",\n data: JSON.stringify(jsonFormData),\n headers: {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n success: function(data) {\n document.location.replace(`/insights/assessment/${data.uuid}`)\n },\n error: function(error) {\n console.log(error);\n }\n\n });\n});\n\n//using getJSON(path, callback)\nfunction loadStudentData(path) {\n $.getJSON(path, studentsHandler);\n}\n\nfunction studentsHandler(data) {\n var students = data['students'];\n let student_node = document.getElementById('id_students');\n student_node.innerHTML = \"\";\n \n //Work on the info of the classes received\n if (students.length === 0){\n student_node.innerHTML = `\n <li>Choose Filter Student(s)</li>\n <li>No Available Students</li>\n `;\n innerContent += '';\n $(\"#id_student_count\").html(`${0}`);\n }else {\n student_node.innerHTML = `<li>Choose Filter Student(s)</li>`;\n for (student of students) {\n student_node.appendChild(create_li_tag(student))\n }\n $(\"#id_student_count\").html(\"(\"+students.length+\")\");\n }\n}\n\n$(\".comparative_class\").on('click', function(e) {\n var matched_list = [];\n $('.comparative_class:checked').each(function() {\n let classId = parseInt(this.value);\n matched_list.push(classId);\n });\n loadStudentData('/insights/student-json?class='+matched_list);\n e.stopImmediatePropagation();\n});\n\n\nfunction create_li_tag(student) {\n /*\n Dynamically load students records into list tags for the comparative module\n */\n let checkbox_input = document.createElement('INPUT');\n let li_tag = document.createElement('LI');\n checkbox_input.setAttribute(\"name\", \"comparative_students\");\n checkbox_input.setAttribute(\"value\", student.pk);\n checkbox_input.setAttribute(\"type\", \"checkbox\");\n checkbox_input.setAttribute(\"style\", \"text-align: left; width: 25px; float: left;\"); \n li_tag.append(student.name);\n li_tag.appendChild(checkbox_input);\n return li_tag;\n}\n\n\nfunction checkAll(name) {\n /*\n select all subjects for the data sheet module\n */\n if ($(this).is(':checked')) {\n $(`input[name=${name}]`).prop('checked', true);\n } else {\n $(`input[name=${name}]`).prop('checked', false);\n }\n}\n \n\nfunction groupSubmitHandler(e){\n let myForm = document.forms.namedItem(\"new_group\"); \n let myData = new FormData(myForm);\n\n let jsonFormData = {}\n for (const [key, value] of myData.entries()) {\n jsonFormData[key] = value;\n }\n\n if (jsonFormData['school'] && jsonFormData['name'] && jsonFormData['description']) {\n $.ajax({\n url: \"/api/v1.0/groups\",\n method: \"POST\",\n data: JSON.stringify(jsonFormData),\n headers: {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n success: function(data) {\n // Create a notification here!\n let message = createMessage(`Group created successfully!`, 1);\n let messages = [message];\n updateMessages(messages);\n setTimeout(function(){\n loadGroup();\n }, 2000);\n },\n error: function(error) {\n let message = createMessage(`Group failed!`, 0);\n let messages = [message];\n updateMessages(messages);\n setTimeout(function(){\n loadGroup();\n }, 2000);\n }\n\n });\n } else {\n let message = createMessage(`Failure. Name or Description required`, 0);\n let messages = [message];\n updateMessages(messages);\n }\n}\n\nfunction createNewGroupRow(schoolID) {\n let newTR = document.createElement('TR');\n let newTDSerialNumber = document.createElement('TD');\n let newTDName = document.createElement('TD');\n let newTDDescription = document.createElement('TD');\n let newTDSave = document.createElement('TD');\n \n\n //Create input tags\n let inputName = document.createElement('INPUT');\n let inputDescription = document.createElement('INPUT');\n let iconSave = document.createElement('A');\n let school = document.createElement('INPUT');\n\n school.setAttribute('type', 'hidden');\n school.setAttribute('name', 'school');\n school.setAttribute('value', schoolID);\n newTDSerialNumber.appendChild(school);\n\n //Assign Types and append to the TDs\n //Name\n inputName.setAttribute('type', 'text');\n inputName.setAttribute('placeholder', 'Enter Group Name Here...');\n inputName.setAttribute('name', \"name\");\n newTDName.appendChild(inputName);\n \n //Save\n iconSave.setAttribute('class', 'status pay');\n iconSave.setAttribute('id', \"id_new_group\");\n iconSave.setAttribute('onclick', 'groupSubmitHandler()')\n iconSave.setAttribute('style', 'font-size: 11px; padding: 3px 10px; cursor: pointer; background: #2a7901;')\n iconSave.innerHTML = \"Save Group...\";\n newTDSave.appendChild(iconSave);\n\n //GroupDescription\n inputDescription.setAttribute('type', 'text');\n inputDescription.setAttribute('placeholder', 'Describe This Group...');\n inputDescription.setAttribute('name', 'description');\n newTDDescription.appendChild(inputDescription);\n newTDDescription.setAttribute('colspan', '2');\n\n newTR.appendChild(newTDSerialNumber);\n newTR.appendChild(newTDName);\n newTR.appendChild(newTDDescription);\n newTR.appendChild(newTDSave);\n \n let tBody = document.getElementById('id_t_group');\n tBody.append(newTR);\n $(\"#id_create_group\").hide();\n}\n\nfunction loadGroup() {\n //Clear the body of the table first\n $(\"#id_t_group\").html('');\n \n //get the groups from the group API\n $.getJSON(\"/api/v1.0/groups\", function(data) {\n $(\"#id_reg_number\").val(\"\");\n //get the data\n var content = '';\n for (group of data) {\n let innerContent = `\n <tr>\n <td><input type=\"checkbox\" name=\"group\" value=\"${group.id}\" id=\"${group.id}\"></td> \n <td>${group.name}</td> \n <td>${group.description}</td> \n <td>${group.counter}</td> \n <td>\n <a href=\"javascript:void(0)\" \n onclick=\"deleteGroup(${group.id})\" \n class=\"status cancel\" style=\"font-size: 11px; padding: 2px 10px;\" \n >Delete</a>\n </td> \n </tr>\n `\n content += innerContent;\n }\n $(\"#id_t_group\").html(content);\n });\n document.forms.namedItem(\"new_group\").reset();\n $(\"#id_create_group\").show();\n}\n\n\n// Assign students to groups\n$(\"#load_group\").submit(function(e){\n e.preventDefault();\n e.stopImmediatePropagation();\n let matched_list = [];\n $('input[name=group]:checked').each(function() {\n let groupId = parseInt(this.value);\n matched_list.push(groupId);\n });\n \n let myForm = document.forms.namedItem(\"new_group\"); \n let myData = new FormData();\n if (matched_list.length) {\n myData.append('groups', matched_list);\n //myData.append('csrfmiddlewaretoken', document.getElementsByName('csrfmiddlewaretoken')[0].value);\n }\n if ($(\"#id_reg_number\") != \"\") {\n myData.append(\"reg_number\", $(\"#id_reg_number\").val());\n }\n\n let jsonFormData = {}\n for (const [key, value] of myData.entries()) {\n if (key == \"groups\") {\n jsonFormData[key] = matched_list;\n } else {\n jsonFormData[key] = value;\n }\n }\n $.ajax({\n url: \"/insights/update-group-api\",\n method: \"POST\",\n data: JSON.stringify(jsonFormData),\n headers: {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n success: function(data) {\n let messages = []\n let message = createMessage(\"Student(s) assigned successfully!\", data.level);\n messages.push(message);\n updateMessages(messages);\n setTimeout(() => {\n loadGroup();\n }, 2000);\n },\n error: function(error) {\n let messages = []\n let message = createMessage(`<strong>Data Exception-></strong> Data duplication error.`, 0);\n messages.push(message);\n updateMessages(messages);\n setTimeout(() => {\n loadGroup();\n }, 2000);\n }\n\n });\n});\n\n// Delete group from list\nfunction deleteGroup(id) {\n $.ajax({\n url: `/insights/delete-group-api/${id}`,\n method: \"GET\",\n headers: {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n success: function(data) {\n // Create a notification here!\n updateMessages(data.message);\n setTimeout(function(){\n loadGroup();\n }, 2000);\n },\n error: function(error) {\n let message = createMessage(`Group failed!`, 0);\n let messages = [message];\n updateMessages(messages);\n setTimeout(function(){\n loadGroup();\n }, 2000);\n }\n\n });\n}\n\n\nfunction loadAssessment() {\n $.getJSON(\"/insights/assessments\", function(data) {\n $(\"#id_stashed_assessment\").html('');\n //get the data\n var content = '';\n for (assessment of JSON.parse(data['assessments'])) {\n let innerContent = `\n <tr>\n <td><input type=\"radio\" name=\"starched_assessment\" value=\"${assessment.pk}\" id=\"${assessment.pk}\"></td> \n <td>${assessment.fields.title}</td> \n <td>${assessment.fields.assessment_year}</td>\n <td>${assessment.fields.assessment_type}</td> \n <td>\n <a href=\"javascript:void(0)\" \n onclick=\"deleteAssessment(${assessment.pk})\" \n class=\"status cancel\" style=\"font-size: 11px; padding: 2px 10px;\" \n >Remove</a>\n </td> \n </tr>\n `\n content += innerContent;\n }\n $(\"#id_stashed_assessment\").html(content);\n });\n}\n\nfunction deleteAssessment(id) {\n $.ajax({\n url: `/insights/delete-assessment-api/${id}`,\n method: \"GET\",\n headers: {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n success: function(data) {\n // Create a notification here!\n updateMessages(data.message);\n document.location.reload();\n },\n error: function(error) {\n let message = createMessage(\"Failed to delete assessment\", 0);\n let messages = [message];\n updateMessages(messages);\n }\n\n });\n}\n\nlet loadStashedAssessment = document.getElementById('load_stashed');\nloadStashedAssessment.addEventListener('click', loadStashedHandler);\n\nfunction loadStashedHandler() {\n document.location.replace(\"/insights/assessment/\"+$(\"input[name=stashed]:checked\").val());\n}\n\nfunction createMessage(message, level) {\n return {\n \"message\": `${message}`,\n \"level\": level,\n }; \n}\n\nfunction updateMessages(messages) {\n $(\"#mod-alert\").html(\"\");\n $.each(messages, function(i,m) {\n $(\"#mod-alert\").html(\"<p><strong>ALERT: </strong>\"+m.message+\"</p>\");\n $(\"#mod-alert\").removeClass('in-hide');\n if (m.level === 0) {\n $(\"#mod-alert\").addClass('side-error');\n }\n setTimeout(function() {\n $(\"#mod-alert\").removeClass('side-notify', 500);\n $(\"#mod-alert\").addClass('in-hide');\n }, 5000);\n })\n }\n\nfunction changeSlider(id, value, cost) {\n var newValue = document.getElementById(id);\n newValue.nextElementSibling.innerHTML = '&#x20A6;'+(value*parseFloat(cost)).toString().replace(/\\B(?=(\\d{3})+(?!\\d))/g, \",\");\n if (value > 0) {\n newValue.innerHTML = `0 - ${value}`;\n } else {\n newValue.innerHTML = value;\n }\n}" }, { "alpha_fraction": 0.6453067064285278, "alphanum_fraction": 0.6497412919998169, "avg_line_length": 36.17307662963867, "blob_id": "e089119559741490900d6ed824e6c0fdb87a62f3", "content_id": "2b7d4cef520272273f1419184cbdd86db9bf7304", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13530, "license_type": "permissive", "max_line_length": 203, "num_lines": 364, "path": "/apps/insights/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom institutions.models import StudentClass\nfrom states.models import State\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom .models import ASSESSMENT_TYPE\nfrom django.http import HttpResponse\nfrom students.models import Student\nfrom .models import Assessment\nfrom subjects.models import Subject\nfrom api.serializers import (SubjectSerializer, StudentSerializer, ResultSerializer, AssessmentSerializer)\nfrom django.contrib import messages\nfrom results.models import Result\nfrom results.utils import StudentReport\nfrom config.utils import ChartData\nfrom insights.models import GroupAssessment\nfrom django.core import serializers\nfrom django.views.decorators.cache import cache_page\nimport math\n\ntry:\n import json\nexcept: \n import simplejson as json\n\n# Create your views here.\ndef get_aids():\n return [{'value': aid[1], 'key': aid[0]} for aid in list(settings.SPECIAL_NEEDS)]\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef insights(request):\n teacher = request.user.teacher\n if teacher.is_admin:\n data = {}\n else:\n data = {}\n return render(request, 'insights/analytics.html', data)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef load_assessment(request, uuid):\n \n # get the the assessment details\n assessment = get_object_or_404(Assessment, uuid=uuid)\n context_data = {\n \"assessment\": assessment,\n \"genders\": True,\n \"aids\": get_aids(),\n \"grades\": True,\n \"assessment\": True,\n \"gradings\": [1,2],\n \"filter_classes\": StudentClass.objects.filter(school=request.user.teacher.school),\n \"sort_fields\": [\n 'first_name', \n 'last_name', \n 'gender',\n 'age',\n 'state_of_origin',\n 'student_class'\n ]\n }\n return render(request, 'insights/assessment_details.html', context_data)\n\n\n@cache_page(60 * 60)\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef load_student_json(request):\n \"\"\"\n A json response for loading the students for a selected\n class in the comparison module\n \"\"\"\n data = {}\n if request.method == 'GET':\n params = request.GET\n class_ids = params.get('class')\n class_id_list = class_ids.split(',')\n\n # Double check that the class_id is not empty\n if class_id_list:\n # student_classes = StudentClass.objects.filter(id__in=class_id_list).values('id')\n data['students'] = [{'pk': student.id, 'name': student.full_name, 'reg_number': student.reg_number} for student in Student.objects.filter(student_class_id__in=class_id_list, user_status='A')]\n else:\n data['students'] = []\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef compare_api(request):\n \"\"\"\n Returns a JSON for the records of students that matches the given GET parameters\n \"\"\"\n params = request.GET\n students_ids = params.getlist('comparative_students')\n assessment_id = params.get('comparative_assessment')\n subjects_ids = params.getlist('comparative_subjects')\n termlist = params.get('term')\n subjects_comparison = params.get('subjects_comparison')\n gender_list = params.get('gender')\n geo_list = params.get('geo_location')\n update_message = []\n level = 1\n \n # Get data from the database\n students = Student.objects.filter(id__in=students_ids)\n subject_serializer = SubjectSerializer(Subject.objects.filter(id__in=subjects_ids), many=True)\n student_serializer = StudentSerializer(Student.objects.filter(id__in=students_ids), many=True)\n results = Result.objects.filter(subject__id__in=subjects_ids)\n students_results = []\n assessment = None\n\n if termlist is not None and termlist != '':\n termlist = termlist.split(',')\n results = results.filter(term__in=termlist)\n\n if subjects_comparison is not None and subjects_comparison != '':\n subjects_comparison = subjects_comparison.split(',')\n results = results.filter(subject__id__in=subjects_comparison)\n \n if gender_list is not None and gender_list != '':\n gender_list = gender_list.split(',')\n students = students.filter(gender__in=gender_list)\n \n if geo_list is not None and geo_list != '':\n geo_list = geo_list.split(',')\n students = students.filter(state_of_origin__id__in=geo_list)\n \n for student in students:\n data = {}\n data[\"name\"] = student.full_name\n data[\"reg_number\"] = student.reg_number\n data[\"current_class\"] = \"{}-{}\".format(student.student_class.caption, student.student_class.nick_name)\n data[\"result\"] = ChartData.current_report(student, results.filter(student=student))\n students_results.append(data)\n if assessment_id != 'all':\n assessment = get_object_or_404(Assessment, pk=assessment_id)\n else:\n assessment = assessment_id\n # Check that there are not more than 5 students to compare\n if len(students_ids) > 5:\n messages.error(request, \"Sorry you are only allowed to compare a maximum of five (5) students.\")\n level = 0\n \n # Also check to make sure the user doesn't select only on student\n if len(students_ids) <= 1:\n messages.error(request, \"You need more than one students for the analysis\")\n level = 0\n \n if len(students_ids) > 1 or len(students_ids) <= 5:\n messages.success(request, \"Student Comparatives Loaded Successfully\")\n level = 1\n\n for message in messages.get_messages(request):\n update_message.append(\n {\n\t\t\t\t\"level\": level,\n\t\t\t\t\"message\": message.message,\n\t\t\t\t\"tags\": message.tags\n\t\t\t}\n )\n data = {\n 'subjects_compared': subject_serializer.data,\n 'students_records': students_results,\n \"message\": update_message\n # 'assessment': assessment.title,\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef update_group_api(request):\n update_messages = []\n level = 0\n request_body = request.body.decode(\"utf-8\") # We are decoding because ajax returned a byte payload\n params = json.loads(request_body) # deserialize the payload\n group_ids = params.get(\"groups\")\n reg_number = params.get(\"reg_number\")\n\n for group_id in group_ids:\n group = get_object_or_404(GroupAssessment, pk=group_id)\n group.students.add(get_object_or_404(Student, reg_number=reg_number))\n if group_id == group_ids[-1]:\n level = 1\n\n if level == 1:\n messages.success(request, \"Success!\")\n else:\n messages.error(request, \"Failure!\")\n for message in messages.get_messages(request):\n update_messages.append({\"message\": message.message, \"tags\": message.tags})\n data = {\n \"message\": update_messages,\n \"level\": level,\n }\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef delete_group_api(request, group_id):\n update_messages = []\n level = 1\n try:\n group = get_object_or_404(GroupAssessment, pk=group_id)\n g_name = group.name\n group.delete()\n messages.success(request, \"Group: '%s', removed successfully\" % (g_name))\n except:\n messages.error(request, \"Group deletion failed!\")\n level = 0\n for message in messages.get_messages(request):\n update_messages.append({\"message\":message.message, \"tags\": message.tags})\n data = {\n \"message\": update_messages,\n \"level\": level\n }\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@cache_page(60 * 60)\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef compare_students(request):\n params = request.GET\n subjects_ids = params.getlist('comparative_subjects')\n \n\n template_name = 'insights/compare.html'\n data = {\n 'subjects_compared': Subject.objects.filter(id__in=subjects_ids),\n 'term': True,\n 'genders': True\n }\n return render(request, template_name, data)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef assessment_api(request):\n # get all argumentss from the request\n teacher = request.user.teacher\n params = request.GET\n assessment_id = params.get(\"assessment\")\n class_list = params.get(\"class\")\n gender_list = params.get(\"gender\")\n special_aids_list = params.get(\"aids\")\n ranking = params.get('rank')\n averages = params.get('averages')\n sort_value = params.get('sorter', None)\n\n assessment = get_object_or_404(Assessment, uuid=assessment_id)\n class_ids = [student_class.id for student_class in assessment.student_class.all()]\n students = Student.objects.filter(student_class__id__in=class_ids, school=teacher.school, user_status='A')\n if class_list:\n class_list = class_list.split(',')\n students = students.filter(student_class__id__in=class_list)\n \n if special_aids_list:\n special_aids = special_aids_list.split(',')\n students = students.filter(special_aids__in=special_aids)\n \n if gender_list:\n gender_list = gender_list.split(',')\n students = students.filter(gender__in=gender_list)\n \n if ranking and ranking != '0':\n ranks = [load_students_records(student)['id'] for student in students if load_students_records(student)['ranking'] in range(1, int(ranking)+1)]\n students = students.filter(id__in=ranks)\n \n if averages and averages != '0':\n average_list = [load_students_records(student)['id'] for student in students if load_students_records(student)['performance'].get('average') <= float(averages)]\n students = students.filter(id__in=average_list)\n \n # need some form realtime sort\n if sort_value:\n if sort_value != 'age':\n students = students.order_by(sort_value)\n else:\n students = students.order_by('-birth_date')\n \n students_assessment = [load_students_records(student) for student in students]\n data = {\n \"assessment\": {\n \"id\": assessment.id, \n \"name\": \"{}-{}\".format(assessment.title, assessment.get_assessment_year()), \n \"title\": assessment.title, \n \"type\": assessment.get_assessment_type(),\n \"url\": assessment.get_absolute_url()\n },\n \"students\": students_assessment\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef delete_assessment_api(request, assessment_id):\n update_messages = []\n level = 1\n try:\n assessment = get_object_or_404(Assessment, pk=assessment_id)\n assessment_name = assessment.title\n assessment.delete()\n messages.success(request, \"Assessment: '%s', removed successfully\" % (assessment_name))\n except:\n messages.error(request, \"Assessment deletion failed!\")\n level = 0\n for message in messages.get_messages(request):\n update_messages.append({\"message\":message.message, \"tags\": message.tags})\n data = {\n \"message\": update_messages,\n \"level\": level\n }\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef get_assessments(request):\n assessments = Assessment.objects.filter(school=request.user.teacher.school)\n assessment_serialized = serializers.serialize('json', assessments, fields=('id', 'title', 'assessment_type', 'assessment_year'))\n\n data = {\n 'assessments': assessment_serialized,\n 'success': 'Ok'\n }\n return HttpResponse(json.dumps(data), content_type='application/json')\n\ndef load_students_records(student):\n '''\n This is a simple function for returning the \n students data for assessment\n '''\n rankings = StudentReport.board(student.school)\n my_rank = list(rankings.keys()).index(student)+1\n data = StudentReport.overall_grade(student)\n performance_data = {\n \"total\": float(data[\"total\"]),\n \"average\": data[\"average\"],\n \"grade\": data[\"grade\"]\n }\n\n # Hoping to use a data structure in the future. Possibly a linked-list\n return {\n \"id\": student.id,\n \"first_name\": student.first_name,\n \"last_name\": student.last_name,\n \"reg_number\": student.reg_number,\n \"gender\": student.gender,\n \"dob\": (datetime.today().year - student.birth_date.year) if student.birth_date else \"No DOB specified\",\n \"state\": student.state_of_origin.name if student.state_of_origin else \"-\",\n \"nationality\": student.country.name if student.country else \"-\",\n \"class\": \"{}-{}\".format(student.student_class.caption, student.student_class.nick_name) if student.student_class.nick_name else student.student_class.caption,\n \"photo\": student.photo.url if student.photo else \"null\",\n \"ranking\": my_rank,\n \"aids\": student.special_aids if student.special_aids else \"No\",\n \"performance\": performance_data\n }" }, { "alpha_fraction": 0.5889261960983276, "alphanum_fraction": 0.6023489832878113, "avg_line_length": 23.79166603088379, "blob_id": "231a371b95304567953b5e3a297c4ea666f89618", "content_id": "510bdbda19650b73caf0c8d41e72be74dbd88371", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 596, "license_type": "permissive", "max_line_length": 81, "num_lines": 24, "path": "/apps/contacts/models.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Contact(models.Model):\n \n NEW = 'New'\n TREATED = 'Treated'\n \n CONTACT_STATUS = (\n (NEW, 'New'),\n (TREATED, 'Treated'),\n )\n \n name = models.CharField(max_length=100)\n email = models.EmailField()\n subject = models.CharField(max_length=100)\n message = models.TextField()\n created_on = models.DateTimeField(auto_now_add=True)\n status = models.CharField(max_length=20, default=NEW, choices=CONTACT_STATUS)\n \n class Meta:\n ordering = ['-id']\n \n def __unicode__(self):\n return self.name\n\n" }, { "alpha_fraction": 0.5990025997161865, "alphanum_fraction": 0.6006649136543274, "avg_line_length": 40.328243255615234, "blob_id": "b80d1a1b4004993f3eccfe5521d89246d022dae6", "content_id": "3147c43d242b61cf3f8efe587bae4d5cd3d6ad58", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5414, "license_type": "permissive", "max_line_length": 137, "num_lines": 131, "path": "/apps/students/forms.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Student\nfrom config.utils import create_user\nfrom django.utils.text import slugify\nimport uuid\nfrom django.contrib.admin.widgets import AdminDateWidget\nfrom django.forms.extras.widgets import SelectDateWidget\nimport datetime\nfrom django.db import transaction, IntegrityError\nfrom institutions.models import StudentClass\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\nclass StudentCreationForm(forms.ModelForm):\n year_of_admission = forms.DateField(widget = forms.DateInput(attrs=({'type':'date', 'style': 'width: 45%;'})))\n\n def __init__(self, school, *args, **kwargs):\n super(StudentCreationForm, self).__init__(*args, **kwargs)\n self.fields['last_name'].widget.attrs = {'placeholder' : 'Student Surname', 'class': 'form-control'}\n self.fields['last_name'].widget.attrs = {'placeholder' : 'Student Surname', 'class': 'form-control'}\n self.fields['student_class'].widget.attrs = {'placeholder' : 'Class'}\n self.fields['student_class'].queryset = StudentClass.objects.filter(school=school)\n self.fields['gender'].widget.attrs = {}\n self.fields['reg_number'].widget.attrs = {'placeholder' : 'Reg Number', 'class': 'form-control'}\n class Meta:\n model = Student\n fields = (\n 'last_name',\n 'first_name',\n 'reg_number',\n 'gender',\n 'student_class',\n 'year_of_admission',\n )\n\n @transaction.atomic\n def save(self, commit=True):\n user = create_user(self.cleaned_data['last_name'], self.cleaned_data['first_name'])\n # Set default password to this user's username and birth date (if provided):\n # password = pin_generator()\n user.username = self.cleaned_data['reg_number']\n user.set_password(self.cleaned_data['reg_number'])\n user.save()\n\n instance = super(StudentCreationForm, self).save(commit=False)\n instance.user = user\n orig = slugify(instance.last_name)\n if Student.objects.filter(slug=instance.slug).exists():\n instance.slug = \"%s-%s\" % (orig, uuid.uuid4())\n else:\n instance.slug = \"%s-%s\" % (orig, uuid.uuid4())\n\n instance.save()\n return instance\n\n\nclass BasicProfileForm(forms.ModelForm):\n birth_date = forms.DateField(widget = forms.DateInput(attrs=({'type':'date', 'style': 'height: 30px;'})))\n def __init__(self, *args, **kwargs):\n super(BasicProfileForm, self).__init__(*args, **kwargs)\n self.fields['first_name'].widget.attrs = {'placeholder' : 'Student Surname', 'class': 'form-control'}\n self.fields['last_name'].widget.attrs = {'placeholder' : 'First Name', 'class': 'form-control'}\n self.fields['middle_name'].widget.attrs = {'placeholder' : 'Other Name', 'class': 'form-control'}\n self.fields['phone_number'].widget.attrs = {'class': 'form-control'}\n\n class Meta:\n model = Student\n fields = (\n 'first_name',\n 'last_name',\n 'middle_name',\n 'birth_date',\n 'phone_number'\n )\n\n def save(self, commit=True):\n if self.instance.pk:\n user = self.instance.user\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n return super(BasicProfileForm, self).save(commit=commit)\n\n\nclass PersonalInformationForm(forms.ModelForm):\n \"\"\"Edit an student's personal information.\"\"\"\n def __init__(self, *args, **kwargs):\n super(PersonalInformationForm, self).__init__(*args, **kwargs)\n self.fields['gender'].widget.attrs = {}\n self.fields['phone_number'].widget.attrs = {'class': 'form-control'}\n self.fields['parent_phone_number'].widget.attrs = {'class': 'form-control'}\n self.fields['address'].widget.attrs = {'placeholder' : 'Residential Address e.g #4 glo street, Ikeja ', 'class': 'form-control'}\n self.fields['state_of_residence'].widget.attrs = {'class': 'form-control'}\n self.fields['state_of_origin'].widget.attrs = {'class': 'form-control'}\n self.fields['country'].widget.attrs = {'class': 'form-control'}\n self.fields['religion'].widget.attrs = {'class': 'form-control'}\n self.fields['special_aids'].widget.attrs = {'class': 'form-control'}\n\n class Meta:\n model = Student\n fields = (\n 'gender',\n 'phone_number',\n 'parent_phone_number',\n 'address',\n 'state_of_residence',\n 'state_of_origin',\n 'country',\n 'religion',\n 'special_aids'\n )\n\n\nclass SchoolForm(forms.ModelForm):\n year_of_admission = forms.DateField(widget = forms.DateInput(attrs=({'type':'date', 'style': 'height: 30px;'})))\n def __init__(self, school, *args, **kwargs):\n super(SchoolForm, self).__init__(*args, **kwargs)\n self.fields['user_status'].widget.attrs = {}\n self.fields['reg_number'].widget.attrs = {'placeholder' : 'Reg Number', 'class': 'form-control'}\n self.fields['student_class'].queryset = StudentClass.objects.filter(school=school)\n\n\n class Meta:\n model = Student\n fields = (\n 'user_status',\n 'reg_number',\n 'student_class',\n 'year_of_admission',\n )\n" }, { "alpha_fraction": 0.6438809037208557, "alphanum_fraction": 0.6438809037208557, "avg_line_length": 52.411766052246094, "blob_id": "bdb02fcc67cb8be5e12206ec67513d610f55f97b", "content_id": "ca73de3a0b6816d8d490a45ee05ea80040f8c2d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "permissive", "max_line_length": 102, "num_lines": 17, "path": "/apps/students/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\nfrom django.views.generic.base import TemplateView\n\nurlpatterns = [\n url(r'^$', StudentListView.as_view(), name='list'),\n url(r'^register/$', create_student, name='create-student'),\n url(r'^upload/$', upload_student_csv, name='student-upload'),\n url(r'^import/$', import_student_data , name='student-import'),\n url(r'^edit/(?P<student_slug>[\\w-]+)/$', edit, name='edit'),\n url(r'^restore/(?P<student_slug>[\\w-]+)/$', restore_account, name='restore'),\n url(r'^profile/(?P<student_slug>[\\w-]+)/$', student_profile , name='profile'),\n url(r'^deactivate$', deactivate , name='deactivate'),\n url(r'^photo-update/(?P<student_slug>[\\w-]+)/$', update_photo, name='update-photo'),\n url(r'^awards-board$', TemplateView.as_view(template_name='students/awards.html'), name='awards'),\n url(r'^reg-json$', reg_json, name='reg-json')\n]" }, { "alpha_fraction": 0.5770054459571838, "alphanum_fraction": 0.5922399163246155, "avg_line_length": 59.014286041259766, "blob_id": "1d5582998f5b1149398853b72686b687aac4c1c8", "content_id": "fc3cafd3c4ec440dc98e4ce3e192871b8c53a26c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4201, "license_type": "permissive", "max_line_length": 201, "num_lines": 70, "path": "/apps/students/migrations/0001_initial.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('states', '0001_initial'),\n ('institutions', '0002_auto_20180523_0138'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('photo', models.ImageField(blank=True, upload_to='uploads/%Y/%m/%d')),\n ('last_name', models.CharField(max_length=50, null=True, verbose_name='Surname')),\n ('first_name', models.CharField(max_length=50, null=True, verbose_name='First name')),\n ('middle_name', models.CharField(blank=True, max_length=50, verbose_name='Middle name')),\n ('user_status', models.CharField(blank=True, choices=[('A', 'Active'), ('G', 'Graduated'), ('S', 'Suspended'), ('E', 'Expelled'), ('L', 'Left')], default='A', max_length=1, null=True)),\n ('reg_number', models.CharField(max_length=30)),\n ('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True)),\n ('birth_date', models.DateField(blank=True, db_index=True, null=True)),\n ('address', models.CharField(blank=True, max_length=100, null=True)),\n ('phone_number', models.CharField(blank=True, max_length=15, null=True)),\n ('parent_phone_number', models.CharField(blank=True, max_length=15, null=True)),\n ('year_of_admission', models.DateField(blank=True, null=True)),\n ('religion', models.PositiveIntegerField(blank=True, choices=[(1, 'Christianity'), (2, 'Islam'), (3, 'Others')], null=True)),\n ('permanent_address', models.TextField(blank=True)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('date_modified', models.DateTimeField(auto_now=True)),\n ('slug', models.SlugField(blank=True, max_length=255, unique=True)),\n ('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='country_of_residence', to='states.Country')),\n ('school', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution')),\n ('state_of_origin', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='students_origin', to='states.State')),\n ('state_of_residence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='students_residence', to='states.State')),\n ('student_class', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='institutions.StudentClass')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('last_name',),\n 'verbose_name_plural': 'Students',\n 'verbose_name': 'Student',\n },\n ),\n migrations.CreateModel(\n name='UniqueMapper',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('reg_number', models.CharField(max_length=30)),\n ('short_institution_name', models.CharField(max_length=5, null=True)),\n ('unique_map', models.CharField(blank=True, max_length=50)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ('short_institution_name',),\n 'verbose_name_plural': 'Unique Mappers',\n 'verbose_name': 'Unique Mapper',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6427264213562012, "alphanum_fraction": 0.6473010182380676, "avg_line_length": 38.567874908447266, "blob_id": "727f39ad62214dc40482081ba74bce89b23fafcb", "content_id": "e52b15d3e21c609ad57d3b945aed56390765a7bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17488, "license_type": "permissive", "max_line_length": 148, "num_lines": 442, "path": "/apps/payments/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView\nfrom .models import TokenApplication, AccessCard, AccessToken\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.utils.decorators import method_decorator\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport datetime\nfrom .forms import ApplicationForm, TellerPaymentForm\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom .utils import tokens, PaystackTransaction as Transaction\nfrom reports.utils import render_to_pdf as rtp\nfrom students.models import Student\nfrom django.contrib.humanize.templatetags.humanize import ordinal\nfrom config.utils import pin_generator\nfrom config.models import Config\n\n#from paystackapi.transaction import Transaction\ntry:\n import simplejson as json\nexcept:\n import json\n\n# Create your views here.\nclass PaymentListView(ListView):\n model = TokenApplication\n template_name = 'payments/list.html'\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self):\n teacher = self.request.user.teacher\n queryset = super(PaymentListView, self).get_queryset().filter(school=teacher.school)\n params = self.request.GET\n session = params.get('session','')\n term = params.get('term','all')\n\n if session !='':\n queryset = queryset.filter(session=session)\n if term !='all':\n queryset = queryset.filter(term=term)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(PaymentListView, self).get_context_data(**kwargs)\n tokens = self.get_queryset()\n paginator = Paginator(tokens, self.paginated_by)\n\n page = self.request.GET.get('page')\n\n try:\n payments = paginator.page(page)\n except PageNotAnInteger:\n payments = paginator.page(1)\n except EmptyPage:\n payments = paginator.page(paginator.num_pages)\n context['tokens'] = tokens\n context['years'] = [i for i in range(datetime.date.today().year, datetime.date.today().year-20, -1)]\n return context\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n return super(PaymentListView, self).dispatch(request, *args, **kwargs)\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef apply(request):\n \"\"\"\n Apply for a token for a particular term\n \"\"\"\n\n template_name = 'payments/apply.html'\n school = request.user.teacher.school\n\n if request.method == 'POST':\n session = request.POST.get('session')\n\n if len(session.split('/')) != 2:\n messages.error(request, 'Enter the session in the right format e.g. 2013/2014')\n return HttpResponseRedirect(reverse('payments:apply'))\n\n current_year = session.split('/')\n\n # Ensure the user isn't applying for a future token\n if int(current_year[0]) > datetime.datetime.today().year:\n messages.error(request, 'Ooops!! Sorry you cannot apply for a future session.')\n return HttpResponseRedirect(reverse('payments:apply'))\n \n # Proceed, if the above condition passes\n form = ApplicationForm(request.POST)\n t_application = TokenApplication.objects.filter(\n school=school,\n term=request.POST.get('term'),\n session=session)\n if t_application.filter(status='A').exists():\n messages.error(request, 'You already have an existing token for this session and term')\n return HttpResponseRedirect(reverse('payments:apply'))\n if t_application.filter(status='I').exists():\n messages.error(request, 'Sorry! You already have an initialized request for this session and term,\\\n waiting to be processed')\n return HttpResponseRedirect(reverse('payments:apply'))\n if t_application.filter(status='P').exists():\n messages.error(request, 'Sorry! You already have this token pending for processing')\n return HttpResponseRedirect(reverse('payments:apply'))\n if form.is_valid():\n application = form.save(commit=False)\n application.school = request.user.teacher.school\n application.year = current_year[1]\n application.save()\n messages.info(request, \"Your application is been processed. Track the progress with the Payment ID:'%s'\" % (application.application_id))\n return HttpResponseRedirect(reverse('payments:list'))\n else:\n form = ApplicationForm()\n context = {'form': form}\n return render(request, template_name,context)\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef generate(request):\n \"\"\"\n Generate pins for all the students in the school during that academic year\n\n ARGS:\n token: school's access token already applied for\n \"\"\"\n template_name = 'payments/generate_tokens.html'\n years = [i for i in range(datetime.date.today().year, datetime.date.today().year-20, -1)]\n if request.method == 'POST':\n params = request.POST\n token = params.get('token')\n session = params.get('session')\n term = params.get('term')\n teacher = request.user.teacher\n response = tokens(token, teacher.school, session, term)\n if response:\n messages.success(request, \"Access Tokens were successfully generated for all students in your school\")\n HttpResponseRedirect(reverse('payments:print'))\n return render(request, template_name, {'years': years})\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef cancel_payment(request, payment_id):\n payment_application = get_object_or_404(TokenApplication, payment_id=payment_id)\n payment_application.status = 'C'\n payment_application.save()\n return HttpResponseRedirect(reverse('payments:list'))\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef generate_or_print_tokens(request, token):\n template_name = 'payments/print_tokens.html'\n current_user = request.user.teacher\n context = {}\n access_token = get_object_or_404(AccessToken, token=token)\n\n access_cards = AccessCard.objects.filter(school_token=access_token).prefetch_related('student')\n if access_cards.exists():\n context['access_cards'] = access_cards\n context['school'] = current_user.school\n context['token'] = access_token\n else:\n #get access card for this token\n response = tokens(access_token, current_user.school,\n access_token.token_application.session, access_token.token_application.term)\n if response:\n access_cards = AccessCard.objects.filter(school_token=access_token)\n context['access_cards'] = access_cards\n context['school'] = current_user.school\n context['token'] = access_token\n return rtp(template_name, context)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef search(request):\n return render(request, 'payments/search_pin.html', {})\n\n\ndef search_pin_api(request):\n \"\"\"Search if a student's pin exist for a given session/term\n \n Arguments:\n request {http} -- http request\n \n Returns:\n response -- http response\n \"\"\"\n\n data = {}\n \n # get all the request parameters\n params = request.GET \n reg_number = params.get('reg_number')\n session = params.get('session')\n term = params.get('term')\n\n try:\n student = get_object_or_404(Student, reg_number=reg_number)\n access_card = AccessCard.objects.filter(student=student, session=session, term=term)\n if access_card.exists():\n access_card = access_card[0]\n\n klass = \"\"\n if access_card.student.student_class.nick_name:\n klass = \"{}-{}\".format(access_card.student.student_class.caption, access_card.student.student_class.nick_name) \n else:\n klass = \"{}\".format(access_card.student.student_class.caption)\n data['pin'] = access_card.access_code\n data['name'] = access_card.student.full_name\n data['reg_number'] = access_card.student.reg_number\n data['class'] = klass\n data['message'] = 'Success!'\n data['status_code'] = 1\n else:\n data['message'] = 'No pin Available'\n except:\n data['message'] = \"There was no pin found for student with Reg Number: '%s'\" % (reg_number)\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required\n@user_passes_test(lambda u: u.teacher.is_admin, login_url='/auth/login/')\ndef new_pin(request):\n template_name = 'payments/new_pin.html'\n context = {\n 'years': [i for i in range(datetime.date.today().year, datetime.date.today().year-5, -1)]\n }\n return render(request, template_name, context)\n\n\n@login_required\n@user_passes_test(lambda u: u.teacher.is_admin, login_url='/auth/login/')\ndef json_pin(request):\n \"\"\"Generate a single pin for a particular student\n \n Arguments:\n request {http} -- http request\n \n Returns:\n JSON -- json data with the pin information if successful\n \"\"\"\n\n data = {}\n params = request.GET\n school_token = params.get('token')\n reg_number = params.get('reg_number')\n school = request.user.teacher.school\n\n access_token = AccessToken.objects.filter(token=school_token)\n if access_token.exists():\n access_token = access_token[0]\n try:\n student = get_object_or_404(Student, reg_number=reg_number)\n #Lets be sure there's no pin for this student\n access_card = AccessCard.objects.filter(student=student, session=access_token.session, term=access_token.term)\n if access_card.exists():\n data['message'] = 'This student already has a pin for %s Term, %s session' % (ordinal(term), access_card[0].session)\n else:\n access_card = AccessCard.objects.create(\n student=student,\n access_code='PIN-'+pin_generator(length=12),\n term=access_token.token_application.term,\n session=access_token.token_application.session,\n school_id=school.registration_id,\n school_token=access_token\n )\n\n klass = \"\"\n if access_card.student.student_class.nick_name:\n klass = \"{}-{}\".format(access_card.student.student_class.caption, access_card.student.student_class.nick_name) \n else:\n klass = \"{}\".format(access_card.student.student_class.caption)\n data['pin'] = access_card.access_code\n data['name'] = access_card.student.full_name\n data['reg_number'] = access_card.student.reg_number\n data['class'] = klass\n data['message'] = 'Success!'\n data['status_code'] = 1\n except:\n data['message'] = \"There's No Student with Reg Number: '%s'\" % (reg_number)\n else:\n data['message'] = \"Oops! It seem you're using a wrong token: %s. Contact a SANIFY Developer for help on [email protected]\" % (school_token)\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'student'), login_url='/auth/login/')\ndef validation(request):\n template_name = 'payments/validate.html'\n return render(request, template_name, {})\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'student'), login_url='/auth/login/')\ndef get_validated_pin(request):\n params = request.GET\n data = {}\n access_code = params.get('token')\n try:\n access_card =AccessCard.objects.filter(student=request.user.student, access_code=access_code)\n access_card = access_card[0]\n access_card.validated = True\n access_card.save()\n #return json data\n data['status_code'] = 1\n data['message'] = 'Congratulations! Your pin has been validated and your account now fully ACTIVE'\n data['pin'] = access_card.access_code\n data['name'] = access_card.student.full_name\n data['reg_number'] = access_card.student.reg_number\n data['class'] = \"{}-{}\".format(access_card.student.student_class.caption, access_card.student.student_class.nick_name)\n except:\n data['message'] = 'Sorry! You pin did not validate. Meet your school for a valid pin number'\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required\ndef initiate_payment(request, payment_id):\n school = request.user.teacher.school\n config = Config.objects.get(school=school)\n application = get_object_or_404(TokenApplication, payment_id=payment_id)\n school = request.user.teacher.school\n pay_root = payment_id.split('_')[2]\n students = Student.objects.filter(school=school, user_status='A')\n amount = float(students.count() * config.plan.amount)\n context = {\n 'amount': amount,\n 'pay_root': pay_root,\n 'email': request.user,\n 'school': school,\n 'application': application\n\n }\n return render(request, 'payments/api_pay.html', context)\n\n\n@login_required\ndef make_payment(request):\n school = request.user.teacher.school\n config = Config.objects.get(school=school)\n school = request.user.teacher.school\n students = Student.objects.filter(school=school, user_status='A')\n email = request.user.username\n amount = float(students.count() * config.plan.amount * 100)\n reference = request.POST.get('pay')\n\n transaction = Transaction(secret_key=settings.PAYSTACK_API_KEY)\n response = transaction.initialize(email, amount, reference,\n callback_url=\"{}/payments/payment_confirmed\".format(get_site_url(request)))\n transaction.authorize()\n\n print(response)\n return HttpResponseRedirect(reverse('dashboard'))\n # except Exception as e:\n messages.error(request, \"An error has just occured. It seem you have problems with your connection. \"\n \"Try the action after a while\")\n return HttpResponseRedirect(reverse('payments:list'))\n\n\n@login_required\ndef payment_confirmed(request):\n reference_code = request.GET.get('reference')\n reference = \"SANI_PAY_{}\".format(reference_code)\n p = Transaction(secret_key=settings.PAYSTACK_API_KEY)\n response = p.verify(reference_code).json()\n tokenApp = TokenApplication.objects.get(payment_id=reference_code)\n tokenApp.status = \"A\"\n tokenApp.is_paid = True\n tokenApp.save()\n\n activate_token(tokenApp)\n context = {\n \"token_app\": tokenApp,\n \"response\": response['data'],\n \"amount\": \"%.2f\" % (response['data'].get('amount')/100)\n }\n return render(request, 'payments/confirmed.html', context)\n\ndef req(amount, email, reference):\n import requests\n import webbrowser\n payload={\n \"email\": \"{}\".format(email),\n \"amount\": amount,\n \"reference\": \"{}\".format(reference),\n \"callback_url\": \"{}/auth/dashboard\".format(get_site_url(request))\n }\n data = json.dumps(payload)\n headers={\n \"Authorization\": \"Bearer sk_test_2757e92b4cd26e9b5d54f361b8b4dec3e6ec410d\",\n \"Content-Type\": \"application/json\"\n }\n url = \"https://api.paystack.co/transaction/initialize\"\n return requests.request(\"post\", url,\n headers=headers,\n json=payload\n )\n #webbrowser.open(response['data']['authorization_url'])\n\ndef activate_token(token_application):\n try:\n access_token = AccessToken.objects.get(token_application=token_application)\n except:\n AccessToken.objects.create(\n token_application=token_application,\n )\n\ndef activate_without_pay(request, payment_id):\n tokenApp = TokenApplication.objects.get(payment_id=payment_id)\n tokenApp.status = \"A\"\n tokenApp.is_paid = True\n tokenApp.save()\n\n activate_token(tokenApp)\n return HttpResponseRedirect(reverse('payments:list'))\n\ndef teller_pay(request, payment_id):\n tokenApp = TokenApplication.objects.get(payment_id=payment_id)\n context = {}\n template_name = 'payments/teller.html'\n if request.method == 'POST':\n form = TellerPaymentForm(request.POST)\n if form.is_valid():\n tokenApp.teller_number = form.cleaned_data['teller_number']\n tokenApp.teller_date = form.cleaned_data['teller_date']\n tokenApp.payment_method = 'Teller'\n tokenApp.status = 'P'\n tokenApp.save()\n messages.info(request, \"The Transaction is processing. Your payment will be verified within 48hours\")\n return HttpResponseRedirect(reverse('payments:list'))\n else:\n context['form'] = TellerPaymentForm()\n context['token_app'] = tokenApp\n return render(request, template_name, context)\n\n\ndef get_site_url(request):\n protocol = \"http://\"\n if request.is_secure():\n protocol = 'https://'\n url = \"{}{}\".format(protocol, request.get_host())\n return url" }, { "alpha_fraction": 0.47930195927619934, "alphanum_fraction": 0.5097402334213257, "avg_line_length": 31.85333251953125, "blob_id": "22ad820155e43008b436af40a1862dda56a153f6", "content_id": "cf9adac5c634744bc6a348d9f6e342bcfa509f29", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2464, "license_type": "permissive", "max_line_length": 208, "num_lines": 75, "path": "/apps/staff/templates/staff/register.html", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "{% extends 'staff/list.html' %}\n{% block title %}New Staff{% endblock %}\n\n{% block extracss %}\n<style>\n .click {\n position: absolute;\n background: rgb(12, 113, 121);\n top: 50%;\n right: 10px;\n padding: 2px 15px;\n border: 1px solid rgb(12, 113, 121);\n color: #fff;\n border-radius: 25px;\n box-shadow: 7px 7px 15px 0 rgba(0,0,0,0.3);\n transition: all .34s ease-in-out;\n font-size: 13px;\n cursor: pointer;\n }\n .click:hover {\n box-shadow: 1px 1px 7px 0 rgba(0,0,0,0.3);\n }\n</style>\n{% endblock %}\n\n{% block inner-content %}\n<div id=\"class-page-content\">\n<div class=\"teachers table\">\n <div class=\"header\">\n <h1>New Staff</h1>\n </div>\n</div>\n\n<div class=\"choose-section-name\">\n <div class=\"name-entry\">\n <hr>\n <div class=\"panel panel-default\" style=\"margin-bottom: 0; padding:10px; color: rgb(10, 61, 85); background: #fff;\">\n <form method=\"POST\" action=\"\">{% csrf_token %}\n {% for field in form %}\n {% if field.html_name == 'staff_id' %}\n <div class=\"form-group\" style=\"position: relative;\">\n <label for=\"{{field.id_for_label}}\">Staff ID:</label>\n {{field}}\n <span onclick=\"getRegNumber()\" class=\"icons icons-sm icons-mouse-pointer click\"> Generate ID</span>\n </div>\n {% else %}\n <div class=\"form-group\">\n <label for=\"{{field.id_for_label}}\">{{field.label}}</label>\n {{field}}\n </div>\n {% endif %}\n {% endfor %}\n <div class=\"form-group\">\n <button type=\"submit\" class=\"status pay\" style=\"padding: 7px 30px; border: none;\">Create and Add Another</button>\n <button type=\"button\" class=\"status cancel\" style=\"background: rgb(150, 149, 149); padding: 7px 30px; border: none;\" onclick=\"location.href='{% url 'staff:list' %}'\"> &laquo; Back</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n{% endblock inner-content %}\n{% block extrajs %}\n<script>\n let jsonURL = \"{% url 'staff:staff-json' %}\";\n function generate(path) {\n $.getJSON(path, function(data){\n $('#id_staff_id').val(data['staff_id']);\n })\n }\n\n function getRegNumber(){\n generate(jsonURL);\n }\n</script>\n{% endblock %}\n" }, { "alpha_fraction": 0.5100603699684143, "alphanum_fraction": 0.5633803009986877, "avg_line_length": 27.399999618530273, "blob_id": "cfaa437882fa3a059b9f58b24f6bc9dc9a688213", "content_id": "989d16785f35603586b36e3aff716b1a680f3ac2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 994, "license_type": "permissive", "max_line_length": 66, "num_lines": 35, "path": "/apps/payments/migrations/0004_auto_20181224_1736.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-12-24 16:36\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('students', '0004_auto_20181003_2222'),\n ('payments', '0003_auto_20180527_2326'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='accesscard',\n name='session',\n field=models.CharField(max_length=10, null=True),\n ),\n migrations.AddField(\n model_name='plan',\n name='max_admin',\n field=models.PositiveIntegerField(default=1),\n ),\n migrations.AddField(\n model_name='tokenapplication',\n name='session',\n field=models.CharField(max_length=10, null=True),\n ),\n migrations.AlterUniqueTogether(\n name='accesscard',\n unique_together=set([('student', 'term', 'session')]),\n ),\n ]\n" }, { "alpha_fraction": 0.46886447072029114, "alphanum_fraction": 0.46886447072029114, "avg_line_length": 20, "blob_id": "97ff3a72631cd6fbe36147af9cbce134fc8124cd", "content_id": "fc8769b5f017ccc648b54d1099f1740f40837d71", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "permissive", "max_line_length": 53, "num_lines": 13, "path": "/sani_app/cron.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "\ndef update_class():\n classes = {\n 'NUR': 'Nursery',\n 'PRI': 'Primary',\n 'JSS': 'JSS',\n 'SSS': 'SSS',\n 'CRE': 'Crech'\n }\n '''\n This will be our cron-job to update the students \n classes every end of session\n '''\n pass" }, { "alpha_fraction": 0.6564211845397949, "alphanum_fraction": 0.6593629717826843, "avg_line_length": 37.8110237121582, "blob_id": "06c8244be2467fb477dcf92c2c935477286a51a3", "content_id": "a3094f1d1c5ee8f9670840841749cac92d238a9b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9858, "license_type": "permissive", "max_line_length": 144, "num_lines": 254, "path": "/apps/students/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, render_to_response\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.http import HttpResponse\nfrom django.views.generic import ListView, DetailView, TemplateView\nfrom django.template.response import TemplateResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import get_object_or_404\nfrom institutions.models import StudentClass\nfrom .models import Student\nfrom .forms import *\nfrom .utils import *\nfrom django.utils.decorators import method_decorator\nfrom institutions.models import *\nfrom students.models import *\nfrom config.models import StudentConfig\nfrom students.forms import StudentCreationForm\nfrom results.forms import ImportForm\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q, Avg\nfrom django.db import transaction\nfrom django.contrib import messages\nfrom django.utils.decorators import method_decorator\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom config.utils import Limit\ntry:\n import json\nexcept:\n import simplejson as json\n\n\nclass StudentListView(ListView):\n model = Student\n template_name = 'students/list.html'\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self):\n queryset = Student.objects.get_active_students(self.request.user.teacher.school)\n params = self.request.GET\n reg_number = params.get('reg_number', '')\n class_id = params.get('class', '')\n deactivate = params.get('deactivated')\n\n if deactivate:\n queryset = Student.objects.get_others()\n if reg_number !='':\n queryset = queryset.filter(reg_number=reg_number)\n if class_id !='':\n student_class = get_object_or_404(StudentClass, pk=class_id)\n queryset = queryset.filter(student_class=student_class)\n return queryset\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(StudentListView, self).get_context_data(**kwargs)\n queryset = self.get_queryset()\n\n paginator = Paginator(queryset, self.paginated_by)\n page = self.request.GET.get('page')\n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n context['students'] = queryset\n context['count'] = self.get_queryset().count()\n context['classes'] = StudentClass.objects.filter(school=self.request.user.teacher.school)\n return context\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n return super(StudentListView, self).dispatch(request, *args, **kwargs)\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef create_student(request):\n teacher = request.user.teacher\n limit = Limit(teacher.school.config.plan, teacher.school)\n if request.method == \"POST\":\n form = StudentCreationForm(teacher.school, request.POST)\n if form.is_valid():\n try:\n if limit.limit_reached(Student):\n messages.info(request, \"You have reached the maximum number of students you can add. Ugrade your plan to add more students\")\n return HttpResponseRedirect(reverse('students:list'))\n else:\n new_student = form.save(commit=False)\n new_student.school = request.user.teacher.school\n new_student.save()\n\n setup = StudentConfig(student=new_student)\n setup.save()\n messages.success(request, \"%s's record has been successfully created.\" % (new_student))\n return HttpResponseRedirect(reverse('students:list'))\n except Exception as e:\n messages.error(request, e)\n else:\n form = StudentCreationForm(teacher.school)\n return render(request, 'students/register.html', {'form': form})\n\n\n@login_required\ndef edit(request, student_slug):\n context ={}\n student = Student.objects.get(slug=student_slug)\n template_name = 'students/edit.html'\n inst_form = None\n if request.method == 'POST':\n if hasattr(request.user, 'teacher'):\n inst_form = SchoolForm(student.school, request.POST, request.FILES, instance=student)\n b_form = BasicProfileForm(request.POST, request.FILES, instance=student)\n p_form = PersonalInformationForm(request.POST, request.FILES, instance=student)\n if b_form.is_valid() and p_form.is_valid():\n try:\n if hasattr(request.user, 'teacher'):\n if inst_form.is_valid():\n p_form.save()\n messages.success(request, \"The profile was successfully updated\")\n return HttpResponseRedirect(reverse('students:list'))\n if hasattr(request.user, 'student'):\n p_form.save()\n messages.success(request, \"Your profile was successfully updated\")\n return HttpResponseRedirect(reverse('students:profile', kwargs={'student_slug': request.user.student.slug}))\n except Exception as e:\n messages.error(request, e)\n return HttpResponseRedirect(reverse('students:edit', kwargs={'student_slug': student.slug}))\n else:\n if hasattr(request.user, 'teacher'):\n inst_form = SchoolForm(student.school, instance=student)\n p_form = PersonalInformationForm(instance=student)\n b_form = BasicProfileForm(instance=student)\n context = {\n 'inst_form': inst_form,\n 'p_form': p_form,\n 'b_form': b_form,\n 'student': student\n }\n return render(request, template_name, context)\n\n\n@login_required\ndef restore_account(request, student_slug):\n student = Student.objects.get(slug=student_slug)\n try:\n student.user_status = 'A'\n student.save() \n messages.success(request, \"%s's account has been successfully restored.\" % (student))\n\n return HttpResponseRedirect(reverse('students:list'))\n except:\n return HttpResponseRedirect(reverse('students:list'))\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef deactivate(request):\n teacher = request.user.teacher\n params = request.GET\n student_id = params.get('student_id')\n student = get_object_or_404(Student, pk=student_id)\n\n if teacher.is_admin:\n student.user_status = 'L'\n student.save()\n messages.success(request, \"%s's account have been successfully deactivated.\" % (student))\n else:\n messages.error(request, \"Sorry! you do not have access rights to deactivate this account.\" % (student))\n return HttpResponseRedirect(reverse('students:list'))\n\n\n@login_required\ndef student_profile(request, student_slug):\n template_name = 'students/profile.html'\n student = get_object_or_404(Student, slug=student_slug)\n is_own_profile = (student.user == request.user)\n if is_own_profile and student.user_status == 'A':\n template_name = 'accounts'\n else:\n template_name = 'student_profile'\n\n context = {'student': student}\n\n return render(request, template_name, context)\n\n\n\n@login_required\ndef update_photo(request, student_slug):\n student = get_object_or_404(Student, slug=student_slug)\n if request.method == \"POST\":\n photo = request.FILES['photo']\n\n student.photo = photo\n student.save()\n return HttpResponseRedirect(reverse('students:edit', kwargs={'student_slug': student.slug}))\n\n\n@login_required\ndef student_profile(request, student_slug):\n template_name = 'students/profile.html'\n\n student = get_object_or_404(Student, slug=student_slug)\n context = {'student': student}\n\n return render(request, template_name, context)\n\n\n@login_required\n@user_passes_test(lambda u: u.teacher, login_url=\"/auth/login/\")\ndef import_student_data(request):\n form = ImportForm()\n template_name = 'students/student_import.html'\n return render(request, template_name, {'form': form})\n\n\n@login_required\ndef upload_student_csv(request):\n if request.method == 'GET':\n return render(request, 'students/student_import.html', {})\n try:\n csv_file = request.FILES[\"file\"]\n print(csv_file)\n if not csv_file.name.endswith('.csv'):\n messages.error(request,'File is not CSV type')\n return HttpResponseRedirect(reverse(\"students:student-import\"))\n\n # If the file is too large\n if csv_file.multiple_chunks():\n messages.error(request,\"Uploaded file is too big (%.2f MB).\" % (csv_file.size/(1000*1000),))\n return HttpResponseRedirect(reverse(\"students:student-import\"))\n\n # else continue\n posted, existed = import_student_from_csv(csv_file, request.user.teacher)\n if posted > 0:\n messages.success(request, \"You successfully imported %s New records, but we found %s already existing ones\" % (posted, existed))\n else:\n messages.info(request, \"It appears you do not have any new records to import. Total Existing Records Found: %s\" % (existed))\n except Exception as e:\n messages.error(request, e)\n return HttpResponseRedirect(reverse(\"students:list\"))\n\n\[email protected]\ndef reg_json(request):\n teacher = request.user.teacher\n if request.method == 'GET':\n reg_number = generate_reg_number(teacher.school)\n data = {\n \"reg_number\": reg_number,\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n" }, { "alpha_fraction": 0.7570093274116516, "alphanum_fraction": 0.7570093274116516, "avg_line_length": 29.714284896850586, "blob_id": "60a33a4c25b040d75ec2c431a60c40fef6451a9a", "content_id": "85daeda9e63083018f768aebc61305a2a386f750", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "permissive", "max_line_length": 60, "num_lines": 7, "path": "/apps/students/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Student\n\n# Register your models here.\[email protected](Student)\nclass StudentAdmin(admin.ModelAdmin):\n list_display = ('reg_number', 'first_name', 'last_name')" }, { "alpha_fraction": 0.6891191601753235, "alphanum_fraction": 0.6891191601753235, "avg_line_length": 58.46154022216797, "blob_id": "73a0710b982623e5f89873f1a4b497c047fc65c1", "content_id": "37d176801b2464d71a9bf5ca6197f4b498b0d0e2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "permissive", "max_line_length": 118, "num_lines": 13, "path": "/apps/api/urls.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom .views import *\n\nurlpatterns = [\n url(r'^students$', StudentAPIView.as_view(), name='student-list'),\n url(r'^students/(?P<slug>[\\w-]+)$', StudentRetrieveUpdateDestroyAPIView.as_view(), name='student-instance'),\n url(r'^assessments/(?P<uuid>\\w+)$', AssessmentRetrieveUpdateDestroyAPIView.as_view(), name='assessment-instance'),\n url(r'^groups/(?P<pk>\\d+)$', GroupRetrieveUpdateDestroyAPIView.as_view(), name='group-instance'),\n url(r'^results$', ResultAPIView.as_view(), name='result-list'),\n url(r'^assessments$', AssessmentAPIView.as_view(), name='assessment-list'),\n url(r'^classes$', StudentClassAPIView.as_view(), name='class-list'),\n url(r'^groups$', GroupAPIView.as_view(), name='group-list'),\n]" }, { "alpha_fraction": 0.5635707974433899, "alphanum_fraction": 0.6032461524009705, "avg_line_length": 29.80555534362793, "blob_id": "69d8c6d1ab18b57c892a553989283a83925f122a", "content_id": "bfb6a6991fac8b1801c8d194c60db8c20eae5a8b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1109, "license_type": "permissive", "max_line_length": 115, "num_lines": 36, "path": "/apps/config/migrations/0002_auto_20180523_0138.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-22 23:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('students', '0001_initial'),\n ('payments', '0001_initial'),\n ('config', '0001_initial'),\n ('institutions', '0002_auto_20180523_0138'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='studentconfig',\n name='student',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='students.Student'),\n ),\n migrations.AddField(\n model_name='config',\n name='plan',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='payments.Plan'),\n ),\n migrations.AddField(\n model_name='config',\n name='school',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='institutions.Institution'),\n ),\n ]\n" }, { "alpha_fraction": 0.7485548853874207, "alphanum_fraction": 0.7485548853874207, "avg_line_length": 30.545454025268555, "blob_id": "dd7ea4518f49d41413435cb146a413ee4ee4befd", "content_id": "ceac4460f9e38256efb5fd9b3b621d1cd524648a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "permissive", "max_line_length": 62, "num_lines": 11, "path": "/apps/reports/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Report, ReportBatch\n# Register your models here.\[email protected](Report)\nclass ReportAdmin(admin.ModelAdmin):\n list_display = ('student', 'student_class')\n\n\[email protected](ReportBatch)\nclass ReportBatchAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'school', 'school_resume_date')" }, { "alpha_fraction": 0.7846890091896057, "alphanum_fraction": 0.7846890091896057, "avg_line_length": 34, "blob_id": "7639ed9d1158c1ddccaf3a39e041611498156fb3", "content_id": "1f05917ee1bab375c0e72c7cccc0fba68da9f465", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 209, "license_type": "permissive", "max_line_length": 47, "num_lines": 6, "path": "/apps/insights/admin.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Assessment\n# Register your models here.\[email protected](Assessment)\nclass AssessmentAdmin(admin.ModelAdmin):\n list_display = ('title', 'assessment_type')" }, { "alpha_fraction": 0.5669421553611755, "alphanum_fraction": 0.5779614448547363, "avg_line_length": 32.01818084716797, "blob_id": "e39c53d94c0311eebd4496b5b7a882ec1e85ae01", "content_id": "bbab3f38c0fe96eb6772398a466edcdbba0ea56a", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1815, "license_type": "permissive", "max_line_length": 111, "num_lines": 55, "path": "/sani_app/static/js/payment.js", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "//payment.js\nfunction copy(id) {\n var copyText = document.getElementById(id);\n copyText.select();\n \n try {\n // copy text\n document.execCommand('copy');\n copyText.blur();\n \n // copied animation\n var copied = document.getElementById('c-'+id);\n copied.classList.add('copied');\n setTimeout(function() { copied.classList.remove('copied'); }, 1500);\n }\n catch (err) {\n alert('please press Ctrl+C/Cmd+C to copy');\n }\n}\n\nfunction printMe(id) {\n $('#frm-'+id).submit();\n}\n\n\nlet jsonURL = \"/payments/validate/pin\";\nfunction generate(path) {\n $.getJSON(path, dataHandler);\n}\n\nfunction getDetails(){\n var token = $('#id_token').val();\n var path = jsonURL+'?token='+token;\n generate(path);\n}\n\nfunction dataHandler(data){\n var newNode = document.createElement('div'); \n var content = '<h3 style=\"text-align: center; margin-top: 10px; font-weight: 200;\">Validation Result</h3>';\n content += '<table class=\"table table-striped\"><tbody>';\n if(data['status_code'] == 1) {\n content += '<tr><td>Full Name</td><td>'+data['name']+'</td></tr>';\n content += '<tr><td>Reg Number</td><td>'+data['reg_number']+'</td></tr>';\n content += '<tr><td>Class</td><td>'+data['class']+'</td></tr>';\n content += '<tr><td>Pin</td><td style=\"font-size: 20px;\">'+data['pin']+'</td></tr>';\n content += '<tr><td>Message</td><td style=\"color: #109445;\">'+data['message']+'</td></tr>';\n }else{\n content += '<tr><td>Message</td><td style=\"text-align: center;\">'+data['message']+'</td></tr>';\n }\n content += '</tbody><table></div>';\n newNode.innerHTML = content;\n var container = document.getElementById('response_container');\n container.innerHTML = content;\n document.location.reload();\n}" }, { "alpha_fraction": 0.6276962161064148, "alphanum_fraction": 0.6314226984977722, "avg_line_length": 39.49360656738281, "blob_id": "33f50bc6a99cdbf9d86ecd552ac6c05718383167", "content_id": "70ce0bda65cd409fad7d29d1ec535d93d6e47d04", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31665, "license_type": "permissive", "max_line_length": 166, "num_lines": 782, "path": "/apps/reports/views.py", "repo_name": "pastorenue/sani-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom students.models import Student\nfrom results.utils import StudentReport\nfrom institutions.models import StudentClass\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.views.generic import ListView\nfrom django.core.urlresolvers import reverse\nfrom .utils import render_to_pdf as rtp\nfrom easy_pdf.rendering import render_to_pdf_response, html_to_pdf\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom reports.models import Report, ReportBatch, BroadSheet\nfrom .forms import *\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.forms.formsets import formset_factory\nfrom django.forms.models import modelformset_factory\nfrom config.models import StudentConfig\nfrom payments.models import AccessToken\nfrom staff.models import Teacher\nfrom subjects.models import Subject\nfrom django.contrib.humanize.templatetags.humanize import ordinal\nfrom results.models import Result\nfrom django.core import serializers\nfrom notifications.signals import notify\ntry:\n import simplejson as json\nexcept:\n import json\n\n\nclass ReportBatchListView(ListView):\n model = ReportBatch\n template_name = ''\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self):\n if hasattr(self.request.user, 'teacher'):\n current_user = self.request.user\n is_form_teacher = StudentClass.objects.filter(form_teacher=current_user.teacher).exists()\n queryset = super(ReportBatchListView, self).get_queryset()\n if is_form_teacher or current_user.teacher.is_admin:\n queryset = queryset.filter(school=current_user.teacher.school)\n else:\n pass\n params = self.request.GET\n term = params.get('term', '')\n session = params.get('session', '')\n\n if term != '':\n queryset = queryset.filter(term=term)\n if session != '':\n queryset = queryset.filter(session=session)\n return queryset\n\n def get_context_data(self, **kwargs):\n if hasattr(self.request.user, 'teacher'):\n self.template_name = 'reports/report_batch.html'\n else:\n self.template_name = 'reports/student_batch.html'\n context = super(ReportBatchListView, self).get_context_data(**kwargs)\n queryset = self.get_queryset()\n paginator = Paginator(queryset, self.paginated_by)\n page = self.request.GET.get('page')\n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n context['batches'] = queryset\n context['count'] = self.get_queryset().count()\n return context\n\n\nclass BroadSheetListView(ListView):\n model = BroadSheet\n template_name = 'reports/broadsheet_list.html'\n paginated_by = settings.PAGE_SIZE\n\n def get_queryset(self):\n teacher = self.request.user.teacher\n queryset = super(BroadSheetListView, self).get_queryset()\n if not teacher.is_admin:\n queryset = queryset.filter(student_class__form_teacher=teacher)\n\n params = self.request.GET\n batch_id = params.get('batch', '')\n class_id = params.get('class', '')\n\n if batch_id != '':\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n queryset = queryset.filter(batch=batch)\n if class_id != '':\n student_class = get_object_or_404(StudentClass, pk=class_id)\n queryset = queryset.filter(student_class=student_class)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(BroadSheetListView, self).get_context_data(**kwargs)\n queryset = self.get_queryset()\n paginator = Paginator(queryset, self.paginated_by)\n page = self.request.GET.get('page')\n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n context['broadsheets'] = queryset\n context['count'] = self.get_queryset().count()\n context['batches'] = ReportBatch.objects.filter(school=self.request.user.teacher.school)\n context['classes'] = StudentClass.objects.filter(school=self.request.user.teacher.school)\n return context\n\n\[email protected]\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef delete_batch(request, batch_id):\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n Report.objects.filter(batch=batch).delete()\n batch.delete()\n messages.success(request, \"You've successfully deleted 'Batch: %s' and all reports associated with it\" % (batch))\n return HttpResponseRedirect(reverse('reports:batch'))\n\n\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\n@login_required\ndef reports(request, batch_id):\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n current_user = request.user\n if current_user.teacher.is_admin:\n reports = Report.objects.filter(batch=batch)\n else:\n reports = Report.objects.filter(batch=batch, student_class__form_teacher=current_user.teacher)\n \n params = request.GET\n term = params.get('term', '')\n class_id = params.get('class', '')\n reg_number = params.get('reg_number', '')\n\n if term != '':\n reports = reports.filter(batch__term=term)\n if reg_number != '':\n try:\n student = get_object_or_404(Student, reg_number=reg_number)\n reports = reports.filter(student=student)\n except:\n pass\n if class_id != '':\n reports = reports.filter(student_class__id=class_id)\n template_name = 'reports/list.html'\n classes = StudentClass.objects.filter(school=request.user.teacher.school)\n paginated_by = settings.PAGE_SIZE\n context = {}\n queryset = reports\n paginator = Paginator(queryset, paginated_by)\n page = request.GET.get('page')\n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n context['reports'] = queryset\n context['count'] = reports.count()\n context['classes'] = classes\n context['batch'] = batch\n\n return render(request, template_name, context)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'student'), login_url='/auth/login/')\ndef student_reports(request):\n reports = Report.objects.filter(student=request.user.student)\n\n params = request.GET\n term = params.get('term', '')\n\n if term != '':\n reports = reports.filter(batch__term=term)\n template_name = 'reports/student_reports.html'\n\n paginated_by = settings.PAGE_SIZE\n context = {}\n queryset = reports\n paginator = Paginator(queryset, paginated_by)\n page = request.GET.get('page')\n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n context['reports'] = queryset\n context['count'] = reports.count()\n context['classes'] = StudentClass.objects.filter(school=request.user.student.school)\n\n return render(request, template_name, context)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef new_report(request):\n template_name = 'reports/new_report.html'\n teacher = request.user.teacher\n classes = StudentClass.objects.filter(school=request.user.teacher.school)\n if not teacher.is_admin:\n classes = classes.filter(form_teacher=teacher)\n\n return render(request, template_name, {'classes': classes})\n\n\[email protected]\n@login_required\ndef generate_report(request):\n if request.method == 'POST':\n params = request.POST\n report_type = params.get('report_type')\n term = params.get('term')\n session = params.get('session')\n class_id=params.get('class')\n reg_number = params.get('reg_number')\n\n try:\n token = params.get('token')\n access_token = get_object_or_404(AccessToken, token=token)\n if access_token.token_application.term != int(term):\n messages.error(request, \"Wrong Token. Please contact SANIFY for help\")\n return HttpResponseRedirect(reverse('reports:new-report'))\n except:\n messages.error(request, \"Invalid Token. Please contact SANIFY for help\")\n return HttpResponseRedirect(reverse('reports:new-report'))\n\n report_batch = None\n #Get student's class\n student_class = get_object_or_404(StudentClass, pk=class_id)\n batch = ReportBatch.objects.filter(school=request.user.teacher.school,term=term,session=session)\n if batch.exists():\n report_batch = batch[0]\n else:\n report_batch = ReportBatch.objects.create(school=request.user.teacher.school,term=term,session=session)\n\n if report_type == 'single_report':\n try:\n student = get_object_or_404(Student, reg_number=reg_number)\n if Report.objects.filter(batch=report_batch,student=student,student_class=student_class).exists():\n messages.info(request, 'Sorry! This report has already been generated. Check the report list for Batch: %s' % (report_batch))\n return HttpResponseRedirect(reverse('reports:new-report'))\n else:\n Report.objects.create(\n batch=report_batch,\n student=student,\n student_class=student_class\n )\n #update the student performance average\n result_data = StudentReport.termly_report(student, term=report_batch.term, student_class=student_class, session=report_batch.session)\n student_config = get_object_or_404(StudentConfig, student=student)\n student_config.previous_performance = student_config.current_performance\n student_config.current_performance = result_data['average']\n student_config.save()\n\n messages.success(request, 'Report has been succesfully generated for %s' % (student))\n notify.send(request.user, \n recipient=student.user, \n verb=\"Your report card for %s term %s, has been processed and ready to view\"\\\n % (ordinal(report_batch.term), student.student_class.caption), \n description=\"\")\n return HttpResponseRedirect(reverse('reports:list', kwargs={'batch_id':report_batch.id}))\n except:\n messages.error(request, 'Sorry! Something is not right with your command. Try again later')\n return HttpResponseRedirect(reverse('reports:new-report'))\n if report_type == 'multiple_report':\n new_value=0\n existing_values=0\n students = Student.objects.filter(student_class=student_class)\n if students.count() == 0:\n messages.info(request, 'Oops!! Sorry this class is empty. We can only create reports for actual students')\n return HttpResponseRedirect(reverse('reports:new-report'))\n try:\n for student in students:\n if Report.objects.filter(batch=report_batch, student=student,student_class=student_class).exists():\n existing_values+=0\n else:\n student_config = get_object_or_404(StudentConfig, student=student)\n Report.objects.create(\n student=student,\n student_class=student_class,\n batch=report_batch\n )\n #update the student performance average\n result_data = StudentReport.termly_report(student, term=report_batch.term, student_class=student_class, session=report_batch.session)\n student_config.previous_performance = student_config.current_performance\n student_config.current_performance = result_data['average']\n student_config.save()\n new_value+=1\n if new_value > 0:\n messages.success(request, 'Report has been generated for %s with %s students. %s already existing.' % (student_class, new_value, existing_values))\n return HttpResponseRedirect(reverse('reports:new-report'))\n if existing_values > 0 and new_value < 0:\n messages.info(request, '%s: already existing.' % (existing_values))\n return HttpResponseRedirect(reverse('reports:new-report'))\n if existing_values == 0 and new_value == 0:\n messages.info(request, 'No new reports found to generate.')\n return HttpResponseRedirect(reverse('reports:new-report'))\n except Exception as e:\n messages.error(request, '%s Sorry! Something is not right with your command. Try again later' % (e))\n return HttpResponseRedirect(reverse('reports:new-report'))\n return HttpResponseRedirect(reverse('reports:new-report'))\n\n\[email protected]\n@login_required\ndef report_remarks(request):\n params = request.POST\n form = None\n has_psychomotor = request.user.teacher.school.config.use_attendance\n if request.method == 'POST':\n if has_psychomotor:\n form = CompleteRemarkForm(request.POST, instance=instance)\n else:\n form = SimpleRemarkForm(request.POST, instance=instance)\n if form.is_valid():\n report = form.save(commit=False)\n report.save()\n messages.success(request, 'Remarks have been added for %s' % (report.student))\n else:\n if has_psychomotor:\n form = CompleteRemarkForm(instance=instance)\n else:\n form = SimpleRemarkForm(instance=instance)\n return render(request, 'reports/remarks.html', {'report': report})\n\n\n@login_required\ndef print_report(request, student_id, term, class_id):\n \"\"\"A function/view to print a single student report detail\n \n Arguments:\n request {request} -- A HTTP request instance\n student_id {integer} -- The student's id\n term {integer} -- Term of which we are geneting the report\n class_id {integer} -- student_class id\n \n Returns:\n template response -- A rtp response that converts the template \n response to a pdf\n \"\"\" \n \n # get all the necessary parameters\n session = request.GET.get('session','')\n student_class = get_object_or_404(StudentClass, pk=class_id)\n student = get_object_or_404(Student, pk=student_id)\n batch = ReportBatch.objects.filter(term=term, session=session)\n\n # we needthe full report details\n result_data = StudentReport.termly_report(student, term=term, student_class=student_class)\n \n # get the student's ranking \n position = StudentReport.get_student_position(student, student_class, term=term, session=session)\n \n # send the context to the pdf file \n context = {\n 'result_data': result_data,\n 'rank': position,\n 'student': student,\n 'no_in_class': len(StudentReport.students_termly_reports(student_class, term=term, session=session)),\n 'report': Report.objects.filter(batch=batch, student_class=student_class, student=student)[0]\n }\n\n #return render_to_pdf_response(request, 'reports/report_card.html', context, download_filename='report.pdf')\n #return render(request, 'reports/report_card.html', context)\n return rtp('reports/report_card.html', context)\n\n\n@login_required\ndef print_broadsheet(request, broadsheet_id):\n broadsheet = get_object_or_404(BroadSheet, pk=broadsheet_id)\n template_name = 'reports/broadsheet.html'\n\n try:\n data = StudentReport.get_data(\n broadsheet.student_class,\n session=broadsheet.batch.session,\n term=broadsheet.batch.term)\n context = {\n 'broadsheet': broadsheet,\n 'data': data\n }\n except:\n context = {}\n return rtp(template_name, context)\n #return render(request, template_name, context)\n\n\n@login_required\[email protected]\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url=\"/auth/login/\")\ndef batch_remark(request, batch_id):\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n reports = Report.objects.filter(batch=batch)\n form = None; teacher = request.user.teacher\n\n if teacher.is_form_teacher:\n form = FormReportForm\n if teacher.is_admin:\n form = HeadTeacherForm\n if teacher.is_admin and teacher.is_form_teacher:\n form = CompleteRemarkForm\n try:\n RemarkFormset = modelformset_factory(Report, form=form, extra=0)\n except:\n messages.error(request, \"You do not have access to this action.\")\n return HttpResponseRedirect(reverse('reports:batch'))\n\n if request.method == \"POST\":\n params = request.POST\n resume_date = params.get('resume_date', '')\n if not resume_date:\n messages.error(request, \"Your resumption date did not validate\")\n return HttpResponseRedirect(reverse('reports:batch'))\n\n batch_remark_formset = RemarkFormset(request.POST, queryset=reports)\n if batch_remark_formset.is_valid():\n try:\n count = 0\n for remark in batch_remark_formset:\n remark = remark.save(commit=False)\n remark.save()\n count+=1\n batch.school_resume_date = resume_date\n batch.save()\n messages.success(request, 'Remarks successfully saved')\n return HttpResponseRedirect(reverse('reports:batch'))\n except:\n messages.error(request, \"Your data did not validate. Check and try again later\")\n return HttpResponseRedirect(reverse('reports:remark', kwargs={'batch_id':batch.id}))\n else:\n batch_remark_formset = RemarkFormset(queryset=reports)\n if not teacher.is_admin:\n klazz = StudentClass.objects.filter(form_teacher=teacher)\n new_reports = [report.student.id for report in Report.objects.filter(batch=batch, student_class__in=klazz)]\n \n else:\n id_in_reports = [report.student.id for report in Report.objects.filter(batch=batch)]\n \n for form in batch_remark_formset:\n form.fields['student'].queryset = Student.objects.filter(id__in=id_in_reports)\n\n context = {\n 'batch': batch,\n 'classes': StudentClass.objects.filter(school=teacher.school),\n 'batch_remark_formset': batch_remark_formset\n }\n\n return render(request, 'reports/remarks.html', context)\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef single_remark(request, card_id):\n card = get_object_or_404(Report, pk=card_id)\n batch_id = card.batch.id\n \n params = request.POST\n resume_date = params.get('resume_date', '')\n\n if request.method == 'POST':\n if not resume_date:\n messages.error(request, \"Your resumption date did not validate\")\n return HttpResponseRedirect(reverse('reports:list', kwargs={'batch_id': card.batch.id}))\n form = PsychomotorForm(request.POST, instance=card)\n \n if form.is_valid():\n remark = form.save(commit=False)\n remark.student = card.student\n remark.save()\n card.batch.school_resume_date = resume_date\n card.batch.save()\n messages.success(request, \"Cognitive remark has been saved for %s\" % (card.student))\n return HttpResponseRedirect(reverse('reports:list', kwargs={'batch_id': batch_id}))\n else:\n form = get_form(request, instance=card)\n return render(request, 'reports/single_remark.html', {'form': form, 'card': card})\n\ndef get_form(req, instance=None):\n form = None\n teacher = req.user.teacher\n if req.method == 'POST':\n if teacher.is_form_teacher:\n if instance is not None:\n form = FormReportForm(req.POST, instance=instance)\n else:\n form = FormReportForm(req.POST)\n if teacher.is_admin:\n if instance is not None:\n form = HeadTeacherForm(req.POST, instance=instance)\n else:\n form = HeadTeacherForm(req.POST)\n if teacher.is_admin and teacher.is_form_teacher:\n if instance is not None:\n form = CompleteRemarkForm(req.POST, instance=instance)\n else:\n form = CompleteRemarkForm(req.POST)\n else:\n if teacher.is_form_teacher:\n if instance is not None:\n form = FormReportForm(instance=instance)\n else:\n form = FormReportForm()\n if teacher.is_admin:\n if instance is not None:\n form = HeadTeacherForm(instance=instance)\n else:\n form = HeadTeacherForm()\n if teacher.is_admin and teacher.is_form_teacher:\n if instance is not None:\n form = CompleteRemarkForm(instance=instance)\n else:\n form =CompleteRemarkForm()\n return form\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url=\"/auth/login/\")\ndef new_broadsheet(request):\n teacher = request.user.teacher\n template_name = 'reports/create_broadsheet.html'\n classes = StudentClass.objects.filter(school=teacher.school)\n if not teacher.is_admin:\n classes = classes.filter(form_teacher=teacher)\n batches = ReportBatch.objects.filter(school=teacher.school)\n\n return render(request, template_name, {'classes': classes, 'batches': batches})\n\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url=\"/auth/login/\")\ndef generate_broadsheet(request):\n if request.method == 'POST':\n params = request.POST\n batch_id = params.get('batch')\n class_id = params.get('class')\n\n #Get student's class\n student_class = get_object_or_404(StudentClass, pk=class_id)\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n broadsheet = BroadSheet.objects.filter(batch=batch, student_class=student_class)\n if broadsheet.exists():\n messages.info(request, 'Broadsheet already exists for %s' % (batch))\n return HttpResponseRedirect(reverse('reports:broadsheet'))\n try:\n BroadSheet.objects.create(\n batch=batch,\n student_class=student_class\n )\n messages.success(request, 'Broadsheet has been succesfully generated for %s' % (batch))\n return HttpResponseRedirect(reverse('reports:broadsheet'))\n except:\n messages.error(request, 'Sorry! Something is not right with your command. Try again later')\n return HttpResponseRedirect(reverse('reports:new-broadsheet'))\n\n\[email protected]\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef delete_broadsheet(request, broadsheet_id):\n broadsheet = get_object_or_404(BroadSheet, pk=broadsheet_id)\n broadsheet.delete()\n messages.success(request, \"You've successfully deleted '%s' from the database.\" % (broadsheet))\n return HttpResponseRedirect(reverse('reports:broadsheet'))\n\n\ndef export_main(request):\n params = request.GET\n school = request.user.teacher.school\n classes = StudentClass.objects.filter(school=school)\n\n context = {\n 'classes': classes\n }\n return render(request, 'reports/exports/_export.html', context)\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'), login_url='/auth/login/')\ndef export_filters(request):\n school = request.user.teacher.school\n params = request.GET\n from_date = params.get('from_date')\n to_date = params.get('to_date')\n class_id = params.get('class')\n status = params.get('status')\n gender = params.get('gender')\n model = params.get('model')\n f_class = params.get('f_class')\n export_format = params.get('format')\n\n payload = {\n 'status': status,\n 'class_id': class_id,\n 'gender': gender,\n 'f_class': f_class\n }\n context = get_model_context(model,school, from_date=from_date, to_date=to_date, **payload)\n context['school'] = school\n template_name = ''\n if model:\n template_name = 'reports/exports/%s_export.html' % (model)\n else:\n template_name = 'reports/exports/student_export.html'\n return rtp(template_name, context)\n\n\ndef get_model_context(model, school, from_date=None, to_date=None, **kwargs):\n #Just mappping the models to some string from the request\n MODEL_MAP = {\n 'student': Student,\n 'staff': Teacher,\n 'subject': Subject,\n 'class': StudentClass,\n 'result': Result\n }\n if model:\n queryset = MODEL_MAP.get(model).objects.filter(school=school)\n if model == 'result':\n if kwargs.get('f_class'):\n queryset = queryset.filter(student_class__id=kwargs.get('f_class'))\n if kwargs.get('f_term'):\n queryset = queryset.filter(term=kwargs.get('f_term'))\n if from_date != '' and to_date !='':\n queryset = queryset.filter(date_created__range=(from_date, to_date))\n klazz = kwargs.get('class_id', '')\n if klazz != '' and klazz is not None:\n klass = get_object_or_404(StudentClass, pk=klazz)\n queryset = queryset.filter(student_class=klass)\n gender = kwargs.get('gender', '')\n if gender != '' and gender is not None:\n queryset = queryset.filter(gender=kwargs.pop('gender'))\n status = kwargs.get('status', '')\n if status != '' and status is not None:\n queryset = queryset.filter(user_status=kwargs.pop('status'))\n return {\n '%s_list' % (model): queryset,\n }\n return {}\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef load_token(request):\n params = request.GET\n session = params.get('session', '')\n term = params.get('term', '')\n lag_year = token_year = ''\n message = 'Error: Invalid session-term combination'\n\n #Be sure that the user has admin access rights\n if request.user.teacher.is_admin:\n pass #Not really doing anything; Just checking\n else:\n raise ValueError(request, \"You are not allowed to access this page\")\n\n #check to make sure the session is in the right format\n try:\n lag_year, token_year = session.split('/')\n except:\n raise ValueError(request, \"Please enter your session in the format '2012/2013'\")\n access_token = AccessToken.objects.filter(token_application__year=token_year, token_application__term=term, token_application__status='A')\n\n #Be very sure the token exists\n if access_token.exists():\n token = access_token[0].token\n message = 'Token updated automatically'\n else:\n token = 'NO TOKEN FOUND'\n data = {\n 'token': token,\n 'msg': message\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n@login_required\n@user_passes_test(lambda u: hasattr(u, 'teacher'))\ndef psycho_domain(request):\n teacher = request.user.teacher\n params = request.POST\n reg_number = params.get('reg_number')\n batch_id = params.get('batch_id')\n class_id = params.get('class_id')\n context = {}\n\n batches = ReportBatch.objects.filter(school=teacher.school)\n\n #Just making sure the user is an admin user\n if not teacher.is_admin:\n raise ValueError(request, \"You do not have permission to view this page\")\n\n if request.method == 'POST':\n #get data for batch and student\n try:\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n student = get_object_or_404(Student, reg_number=reg_number)\n report = Report.objects.filter(student=student, batch=batch, student_class__id=int(class_id))[0]\n except:\n messages.error(request, \"Missng One or more values. Make sure you have selected batch, class and reg_number\")\n form = PsychomotorForm(request.POST, instance=report)\n if form.is_valid():\n cog = form.save(commit=False)\n cog.save()\n messages.success(request, \"Psychomotor Details was saved sucessfully\")\n else: \n params = request.GET\n reg_number = params.get('reg_number')\n batch_id = params.get('batch_id')\n class_id = params.get('class_id')\n #import pdb; pdb.set_trace()\n \n try:\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n student = get_object_or_404(Student, reg_number=reg_number)\n report = Report.objects.filter(student=student, batch=batch, student_class__id=int(class_id))\n context['batch'] = batch\n \n if report.exists():\n report = report[0]\n context['report'] = report\n form = PsychomotorForm(instance=report) \n context['form'] = list(form)\n except:\n messages.error(request, \"Missng One or more values. Make sure you have selected batch, class and reg_number\")\n context['batches'] = batches \n context['classes'] = StudentClass.objects.filter(school=teacher.school)\n return render(request, 'reports/cognitive_domain.html', context)\n \n\ndef load_cog_data(request):\n params = request.GET\n reg_number = params.get('reg_number')\n batch_id = params.get('batch_id')\n data = {}\n\n if batch_id:\n batch = get_object_or_404(ReportBatch, pk=batch_id)\n if reg_number:\n report = Report.objects.filter(batch=batch, student__reg_number=reg_number)[0]\n data['report'] = serializers.serialize('json', [report,])\n else:\n data['error'] = \"No Data Found\"\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\ndef delete_report(request, report_id):\n batch_id = request.GET.get('batch_id')\n report = get_object_or_404(Report, pk=report_id)\n report.delete()\n messages.success(request, \"You've successfully deleted '%s' from the database.\" % (report))\n return HttpResponseRedirect(reverse('reports:list', kwargs={'batch_id': batch_id}))\n\n\ndef token_retriever(request, batch):\n try:\n lag_year, token_year = session.split('/')\n except:\n raise ValueError(request, \"Please enter your session in the format '2012/2013'\")\n access_token = AccessToken.objects.filter(token_application__year=token_year, token_application__term=term, token_application__status='A')\n\n #Be very sure the token exists\n if access_token.exists():\n token = access_token[0].token\n message = 'Token updated automatically'\n else:\n token = 'NO TOKEN FOUND'\n data = {\n 'token': token,\n 'msg': message\n }\n return HttpResponse(json.dumps(data), content_type=\"application/json\")" } ]
118
hafizur-rahman/translator
https://github.com/hafizur-rahman/translator
4c4e7cde9532fed795b6bd5034bc34fc901b67e2
5768485ea2580d953d1b5595ae3104ee0dcfe168
e20abfa4a179ffb83009b9a33532c35de657cea1
refs/heads/master
2022-09-12T01:36:41.384668
2020-05-20T04:11:05
2020-05-20T04:11:05
180,056,847
0
0
null
2019-04-08T02:45:29
2020-05-20T04:11:35
2022-09-01T23:21:37
Java
[ { "alpha_fraction": 0.4451827108860016, "alphanum_fraction": 0.5182723999023438, "avg_line_length": 14.050000190734863, "blob_id": "496a33df7426b8098d43e4c8dcfb8cee70764a8e", "content_id": "e46d426907ac8259c2c3e4a7291e10fc229687e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 301, "license_type": "no_license", "max_line_length": 46, "num_lines": 20, "path": "/docker-compose.yml", "repo_name": "hafizur-rahman/translator", "src_encoding": "UTF-8", "text": "version: \"3.7\"\n\nservices:\n api:\n build: ./api\n ports:\n - \"5000:5000\"\n restart: always\n\n ui:\n build: ./ui\n volumes:\n - \"./ui:/opt/front\"\n ports:\n - \"5001:5001\"\n restart: always\n links:\n - api\n environment:\n - VOCABULARY_SERVICE_URI=http://api:5000\n" }, { "alpha_fraction": 0.6339622735977173, "alphanum_fraction": 0.6679245233535767, "avg_line_length": 17.928571701049805, "blob_id": "aa2722b0ba4f787a01448f8b5fe27144cc91840e", "content_id": "ab99ac615cba19c1b7d2c52491e9f39c66d45957", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 265, "license_type": "no_license", "max_line_length": 50, "num_lines": 14, "path": "/ui/Dockerfile", "repo_name": "hafizur-rahman/translator", "src_encoding": "UTF-8", "text": "FROM ubuntu:16.04\n\nRUN apt-get update \\\n && apt-get install -y python3 python3-pip curl \\\n && apt-get clean \\\n && rm -rf /var/lib/apt/lists/*\n\nCOPY . /opt/front\nWORKDIR /opt/front\nRUN pip3 install -r requirements.txt\n\nENV LANG C.UTF-8\n\nCMD [\"python3\", \"app.py\"]\n" }, { "alpha_fraction": 0.6112326979637146, "alphanum_fraction": 0.6126914620399475, "avg_line_length": 26.420000076293945, "blob_id": "bfd7af09a97c95ba74a7000f9c6183dc61a0daf0", "content_id": "7b3beaecde32976bf24074324e68606876d27a2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1371, "license_type": "no_license", "max_line_length": 78, "num_lines": 50, "path": "/api/src/main/java/com/jdreamer/tutor/ResourceConfig.java", "repo_name": "hafizur-rahman/translator", "src_encoding": "UTF-8", "text": "package com.jdreamer.tutor;\n\nimport org.springframework.beans.factory.annotation.Qualifier;\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.core.io.Resource;\n\nimport java.io.BufferedReader;\nimport java.io.IOException;\nimport java.io.InputStreamReader;\n\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.regex.*;\n\n@Configuration\npublic class ResourceConfig {\n\n @Value(\"classpath:edict\")\n private Resource edict;\n\n @Bean\n @Qualifier(\"dictionary\")\n public Map<String, String> dictionary() {\n Map<String, String> dict = new HashMap<>();\n\n Pattern regex = Pattern.compile(\"([^\\\\s]+)\\\\s?\\\\[.*\\\\]\\\\s?/\\\\s?(.+)\");\n\n try (BufferedReader reader = new BufferedReader(\n new InputStreamReader(edict.getInputStream(), \"EUC-JP\"))) {\n String line = reader.readLine();\n\n while (line != null) {\n Matcher m = regex.matcher(line);\n\n if (m.matches()) {\n dict.put(m.group(1), m.group(2));\n }\n\n // read next line\n line = reader.readLine();\n }\n } catch (IOException e) {\n e.printStackTrace();\n }\n\n return dict;\n }\n}\n" }, { "alpha_fraction": 0.5905923247337341, "alphanum_fraction": 0.6001741886138916, "avg_line_length": 29.210525512695312, "blob_id": "bc1167ba6ae31ce1c47263797a8db63754abbacf", "content_id": "5ccb46a0cde4c72d8fc97746aa6a105f6cc277f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1148, "license_type": "no_license", "max_line_length": 91, "num_lines": 38, "path": "/ui/app.py", "repo_name": "hafizur-rahman/translator", "src_encoding": "UTF-8", "text": "#!/bin/python3\nimport json\nimport os\n\nimport requests\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\nVOCABULARY_SERVICE_URI = os.getenv('VOCABULARY_SERVICE_URI', '')\n\n\[email protected]('/')\ndef index():\n return render_template('index.html',\n title='Japanese Vocabulary Builder',\n VOCABULARY_SERVICE_URI=VOCABULARY_SERVICE_URI)\n\n\[email protected]('/api/parse-url', methods=['POST'])\ndef parseUrl():\n response = requests.post('/'.join([VOCABULARY_SERVICE_URI, 'parse-url']),\n headers={'Content-type': 'application/json'},\n data=json.dumps(request.json, ensure_ascii=False).encode(\"UTF-8\"))\n\n return response.text\n\n\[email protected]('/api/parse-text', methods=['POST'])\ndef parseSite():\n response = requests.post('/'.join([VOCABULARY_SERVICE_URI, 'word-list']),\n headers={'Content-type': 'application/json'},\n data=json.dumps(request.json, ensure_ascii=False).encode(\"UTF-8\"))\n\n return response.text\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001, debug=True)\n" }, { "alpha_fraction": 0.7305699586868286, "alphanum_fraction": 0.7668393850326538, "avg_line_length": 31.16666603088379, "blob_id": "f752dfaf523fc30f89d09e8b0c810aa6877083d5", "content_id": "23a014a165c5e2b9a96931954a1b7f7c4932c81a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 193, "license_type": "no_license", "max_line_length": 72, "num_lines": 6, "path": "/api/Dockerfile", "repo_name": "hafizur-rahman/translator", "src_encoding": "UTF-8", "text": "# syntax=docker/dockerfile:experimental\nFROM maven:3.6.3-jdk-8\n\nCOPY target/vocabulary-service-0.0.1-SNAPSHOT.jar vocabulary-service.jar\n\nENTRYPOINT [ \"java\", \"-jar\", \"vocabulary-service.jar\"]\n" }, { "alpha_fraction": 0.6264150738716125, "alphanum_fraction": 0.6415094137191772, "avg_line_length": 12.300000190734863, "blob_id": "24cd335ca623fbf0cbdec2f4fd63abd99aec1b9b", "content_id": "943b0b32ed40e08087e573515d649026697bdb35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 265, "license_type": "no_license", "max_line_length": 77, "num_lines": 20, "path": "/Readme.md", "repo_name": "hafizur-rahman/translator", "src_encoding": "UTF-8", "text": "## Build API\n```\ncd api\nmvn clean package\n```\n\n## Build Docker Images\n```\ndocker-compose build\n```\n\n## Run Docker Images\n```\ndocker-compose up\n```\n\n## Use the Service\nOpen `http://localhost:5001/` in browser, input `url` or `text` for parsing! \n\n![ui.png](./ui.png)" } ]
6
crystal520/YELP
https://github.com/crystal520/YELP
2d1fc7ac5ad3aa9d00652ab787b65ce11a97e37c
1c78006962c7ae59e7f4780d66b1afa8c5d998dd
43fd1a36fa45a495216e9e7d46c0676a534fad12
refs/heads/master
2016-09-10T03:28:03.801231
2014-05-02T12:05:00
2014-05-02T12:05:00
19,373,947
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6347619295120239, "alphanum_fraction": 0.6409524083137512, "avg_line_length": 19.368932723999023, "blob_id": "3f143d9c305f32c08dd8daeb21f5423b3cea2d43", "content_id": "c4dd482cb81a012aeb18e948c1c8635a55082b26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2100, "license_type": "permissive", "max_line_length": 71, "num_lines": 103, "path": "/yelpapp/app.js", "repo_name": "crystal520/YELP", "src_encoding": "UTF-8", "text": "// Module Dependencies and Setup\n\nvar express = require('express')\n , mongoose = require('mongoose')\n , CheckinModel = require('./models/checkin')\n , welcome = require('./controllers/welcome')\n , analysis = require('./controllers/analysis')\n , http = require('http')\n , flash = require('connect-flash')\n , path = require('path')\n , fs = require('fs')\n , engine = require('ejs-locals')\n , config = require('./config')\n , app = express();\n\napp.engine('ejs', engine);\napp.set('port', 80);\napp.set('views', __dirname + '/views');\napp.set('view engine', 'ejs');\napp.use(express.favicon());\napp.use(express.json());\napp.use(express.urlencoded());\napp.use(express.logger('dev'));\n\n\n\n\n// Helpers\n\napp.use(function(req, res, next){\n app.locals.layoutPath = \"../shared/layout\";\n next();\n});\n\napp.configure(function() {\n app.use(express.cookieParser('keyboard cat'));\n app.use(express.session({ cookie: { maxAge: 60000 }}));\n app.use(flash());\n});\n\n// Routing Initializers\n\napp.use(express.static(path.join(__dirname, 'public')));\napp.use(app.router);\n\n// Error Handling\n\nif ('development' == app.get('env')) {\n app.use(express.errorHandler());\n} else {\n app.use(function(err, req, res, next) {\n res.render('errors/500', { status: 500 });\n });\n}\n\n// Database Connection\n\n\n\n\n// Routing\n\n\n//User route\napp.get('/', welcome.index);\n\n\n\n// // prize status\n\n// app.get('/reset', status.reset);\n// app.get('/initial', status.initial);\n\n\n// // prize status\n\napp.get('/analysis/:id?', analysis.analysis);\napp.get('/review/:id?', analysis.review);\napp.get('/map', analysis.map);\n\n// app.get('/update/:itemName?', status.update);\n\n\n\napp.all('*', welcome.not_found);\n\n// Start Server w/ DB Connection\n\n\n\nvar mongoose = require('mongoose');\nmongoose.connect('mongodb://localhost/yelp');\n\nvar db = mongoose.connection;\ndb.on('error', console.error.bind(console, 'connection error:'));\ndb.once('open', function callback () {\n // yay!\n console.log(\"yay!\");\n http.createServer(app).listen(app.get('port'), function(){\n console.log('Express server listening on port ' + app.get('port'));\n });\n\n});\n\n\n" }, { "alpha_fraction": 0.4917137920856476, "alphanum_fraction": 0.4963930547237396, "avg_line_length": 33.186668395996094, "blob_id": "ea15d33f1a866e2837ae6b80eafbe0f3646f571b", "content_id": "3dfe746930fc28c0655197ec321dea259a591927", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5129, "license_type": "permissive", "max_line_length": 117, "num_lines": 150, "path": "/yelpapp/controllers/analysis.js", "repo_name": "crystal520/YELP", "src_encoding": "UTF-8", "text": "var mongoose = require('mongoose')\n , fs = require('fs')\n , spawn = require('child_process').spawn\n , Checkin = mongoose.model('Checkin')\n , path = require('path');\n\n // status\nexports.analysis = function (req, res, next) {\n var id= req.params.id;\n var action = function (err, collection) {\n // Locate all the entries using find\n collection.find({business_id:id}).toArray(function(err, results) {\n var info= results[0].checkin_info;\n // for (var i = 0; i < Object.keys(info).length; i++) {\n // Object.keys(info)[i]\n // console.log(Object.keys(info)[i]);\n // };\n var sortresult= Object.keys(info).sort(function(a,b){return info[b]-info[a]});\n var split = sortresult[0].split('-');\n var time,week;\n if (split[0]>=12) {\n time=\"PM\";\n } else{\n time=\"AM\"\n };\n\n if (split[1]==0) {\n week = \"Sunday\";\n } else if (split[1]==1) {\n week = \"Monday\";\n } else if (split[1]==2) {\n week = \"Tuesday\";\n } else if (split[1]==3) {\n week = \"Wednesday\";\n } else if (split[1]==4) {\n week = \"Thursday\";\n } else if (split[1]==5) {\n week = \"Friday\";\n } else if (split[1]==6) {\n week = \"Saturday\";\n } ;\n res.json({status: 'success',\"Peak time\": week+' '+split[0]+time});\n });\n };\n\n mongoose.connection.db.collection('checkin', action);\n\n};\n\n\n // status\nexports.review = function (req, res, next) {\n var id= req.params.id;\n //console.log(id);\n var outputFilename = 'review_analysis/review.json';\n var review={};\n var reviewdata = '';\n var analysis_result;\n var review_ANA = spawn('python', ['review_analysis/ML_review.py']);\n var action = function (err, collection) {\n // Locate all the entries using find\n collection.find({business_id:id}).toArray(function(err, results) {\n for (var i = 0; i < results.length; i++) {\n //console.log(\"results[i].text\",results[i].text);\n review.text=review.text+results[i].text;\n };\n review.text.replace(\"undefined\", \"\");\n //console.log(review);\n fs.writeFile(outputFilename, JSON.stringify(review), function(err) {\n if(err) {\n console.log(err);\n } else {\n\n console.log(\"JSON saved to \" + outputFilename);\n\n review_ANA.stdout.on('data', function(data) {\n reviewdata += data.toString();\n //reviewdata = reviewdata.replace(/(\\r\\n|\\n|\\r)/gm,\"\");\n // var reviewdata=reviewdata.split(\"\\n\");\n // console.log(\"reviewdata\",reviewdata);\n });\n \n review_ANA.stderr.on('data', function(data) {\n var string= data.toString();\n if (string.indexOf(\"INFO : topic\") > -1) {\n analysis_result += string;\n //console.log(\"result\");\n };\n\n });\n review_ANA.on('exit', function (code) {\n console.log(\"finaldata\",analysis_result);\n fs.unlink(outputFilename, function (err) {\n if (err) throw err;\n console.log('successfully deleted outputFilename');\n });\n res.json({status: 'success',\"Result\": analysis_result.replace(\"undefined\", \"\")});\n console.log('child process exited with code ' + code);\n });\n }\n }); \n //console.log(results);\n\n //res.json({status: 'success'});\n });\n };\n\n mongoose.connection.db.collection('review', action);\n\n};\n\n\n// status\nexports.map = function (req, res, next) {\n //console.log(id);\n var outputFilename = 'views/welcome/business.json';\n var businessData={};\n var reviewdata = '';\n var analysis_result;\n var review_map = spawn('python', ['review_analysis/map_review.py']);\n var action = function (err, collection) {\n // Locate all the entries using find\n collection.find().toArray(function(err, results) {\n for (var i = 0; i < results.length; i++) {\n // console.log(results[i]);\n businessData[results[i].business_id] = [results[i].latitude,results[i].longitude,results[i].categories]\n\n };\n console.log(businessData);\n fs.writeFile(outputFilename, JSON.stringify(businessData), function(err) {\n if(err) {\n console.log(err);\n } else {\n console.log(\"JSON saved to \" + outputFilename);\n res.render('welcome/map',{\n businesses:results,\n businessData:businessData\n });\n }\n }); \n\n //console.log(results);\n\n //res.json({status: 'success'});\n });\n };\n\n mongoose.connection.db.collection('business', action);\n\n};\n\n" }, { "alpha_fraction": 0.6340010762214661, "alphanum_fraction": 0.6469951272010803, "avg_line_length": 38.29787063598633, "blob_id": "c5ce00cc1812adc734166b69cc2b38cbb19fdfc9", "content_id": "9628d782fc96e5830baaa7255d873edd275c6000", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1847, "license_type": "permissive", "max_line_length": 131, "num_lines": 47, "path": "/yelpapp/review_analysis/ML_review.py", "repo_name": "crystal520/YELP", "src_encoding": "UTF-8", "text": "import logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n\nimport json\nfrom gensim import corpora, models, similarities\nfrom pprint import pprint\nimport gensim, bz2\n\n\n\n\n\n\ndata = []\nwith open('review_analysis/review.json') as f:\n for line in f:\n data.append(json.loads(line))\n # pprint(json.loads(line)[u'text'])\n documents= [json.loads(line)[u'text']]\n \n # remove common words and tokenize\n stoplist = set('just this with that for not have on me i my it was they at had a of the and to in I you'.split())\n texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]\n #pprint(texts)\n # remove words that appear only once\n all_tokens = sum(texts, [])\n # remove first junk word \n del all_tokens[0]\n #pprint(all_tokens)\n tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)\n texts = [[word for word in text if word not in tokens_once] for text in texts]\n # pprint(texts)\n dictionary = corpora.Dictionary(texts)\n dictionary.save('/tmp/deerwester.dict') # store the dictionary, for future reference\n id2word = dictionary\n # print(dictionary)\n print(dictionary.token2id)\n corpus = [dictionary.doc2bow(text) for text in texts]\n corpora.MmCorpus.serialize('/tmp/deerwester.mm', corpus) # store to disk, for later use\n mm = gensim.corpora.MmCorpus('/tmp/deerwester.mm')\n # print(mm)\n\n # extract 5 LDA topics, using 1 pass and updating once every 1 chunk (10,000 documents)\n if dictionary:\n \t#print(mm)\n \tlda = gensim.models.ldamodel.LdaModel(corpus=mm, id2word=id2word, num_topics=5, update_every=1, chunksize=10000, passes=1)\n" }, { "alpha_fraction": 0.6437768340110779, "alphanum_fraction": 0.6566523313522339, "avg_line_length": 20.18181800842285, "blob_id": "8ff0d0d105357428fb03e45a74b43c50c291c932", "content_id": "277b37a373bc726ff001c49015e8e589c9656b5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 233, "license_type": "permissive", "max_line_length": 56, "num_lines": 11, "path": "/yelpapp/controllers/welcome.js", "repo_name": "crystal520/YELP", "src_encoding": "UTF-8", "text": "\n// Get homepage\nexports.index = function(req, res){\n res.render('welcome/index');\n\n}\n\n// Handle 404 gracefully\nexports.not_found = function(req, res){\n req.flash('error', \"That doesn't seem to be a page.\");\n res.redirect('/');\n}" } ]
4
isakagren/StudioV-Pokedex
https://github.com/isakagren/StudioV-Pokedex
cf337e08bd043381ea2ab389a803c0675c2ca049
cd4c3a0f43418763187009e40c20177381f4a8f9
a3cc9c265d91212b7e045ab4d692b4bf86a185a6
refs/heads/master
2020-04-23T18:19:20.112315
2019-02-18T22:07:30
2019-02-18T22:07:30
171,362,826
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6377245783805847, "alphanum_fraction": 0.6377245783805847, "avg_line_length": 18.875, "blob_id": "cc66945f6f2146cd8e694f644cdc5cce0c3d2aef", "content_id": "0c6c48f55ee3081fec1f8513734052e57f117946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 337, "license_type": "no_license", "max_line_length": 92, "num_lines": 16, "path": "/C-Sharp/Pokedex/Filter.cs", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "using System;\r\nusing System.Collections.Generic;\r\nusing System.Linq;\r\nusing System.Text;\r\nusing System.Threading.Tasks;\r\n\r\nnamespace Pokedex\r\n{\r\n public class Filter\r\n {\r\n public static List<Pokemon> FiltreraPokemons(string sökning, List<Pokemon> pokemons)\r\n {\r\n return pokemons;\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.4479452073574066, "alphanum_fraction": 0.4547945261001587, "avg_line_length": 28.41666603088379, "blob_id": "f60396a160a4b5023327438b4f5bb19cc5eef166", "content_id": "47c2fc34a30528ef0244d7413930918035e04fd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1476, "license_type": "no_license", "max_line_length": 99, "num_lines": 48, "path": "/C-Sharp/Pokedex/Ladda.cs", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "using System;\r\nusing System.Collections.Generic;\r\nusing System.IO;\r\nusing System.Linq;\r\nusing System.Text;\r\nusing System.Threading.Tasks;\r\n\r\nnamespace Pokedex\r\n{\r\n public class Ladda\r\n {\r\n public static List<Pokemon> LaddaPokemons()\r\n {\r\n // Skapa en lista där vi kan lädda pokemons\r\n var pokemons = new List<Pokemon>();\r\n\r\n // Läs in listan med pokemons\r\n using (var reader = new StreamReader(@\"./Sökväg till databas\"))\r\n {\r\n // Hoppa över den första raden\r\n reader.ReadLine();\r\n\r\n // Så länge det finns mer att läsa\r\n while (!reader.EndOfStream)\r\n {\r\n // Läs in en rad\r\n var line = reader.ReadLine();\r\n\r\n // Dela raden i bitar vid varje kommatecken ( , ). \r\n var values = line.Split(',');\r\n\r\n // Skapa en pokemonklass som håller datan från cell 2,3,4 i den uppdelade raden\r\n var pokemon = new Pokemon\r\n {\r\n PokemonName = values[2],\r\n HP = Int32.Parse(values[3]),\r\n Atk = Int32.Parse(values[4])\r\n };\r\n\r\n // Lägg till pokemonen i listan\r\n pokemons.Add(pokemon);\r\n }\r\n }\r\n\r\n return pokemons;\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 30, "blob_id": "9c9e9278372d7c0ab94635f465cda44f9f8a0fbd", "content_id": "93c102aa4f4587793ea6f625b0b784b478c7e110", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/Python/Filter.py", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "def filtrera_pokemons(sokning, pokemons):\r\n return pokemons" }, { "alpha_fraction": 0.5842838883399963, "alphanum_fraction": 0.5880861878395081, "avg_line_length": 27.22222137451172, "blob_id": "9648113edb3689bd8f13e033787ee7f8fd192d17", "content_id": "57336773b8e0ddce45408389e2e85773a87bfd68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "no_license", "max_line_length": 88, "num_lines": 27, "path": "/Python/Ladda.py", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "# Pokemonklass\r\nclass Pokemon:\r\n def __init__(self, namn, liv, attack):\r\n self.namn=namn\r\n self.liv=liv\r\n self.attack=attack\r\n\r\n def __str__(self):\r\n return (self.namn + ', liv: ' + str(self.liv) + ', attack: ' + str(self.attack))\r\n\r\n\r\n# Skapa en lista med alla pokemons i databasen\r\ndef skapa_pokemonlista():\r\n pokelist=list()\r\n\r\n # Läser in databasen\r\n f=open(\"./sokvag till databasen\", \"r\")\r\n\r\n # Skippar forsta raden\r\n aPokemon = f.readline().strip().split(\",\")\r\n \r\n # Laser in filen och sparar pokemons i listan pokelist\r\n for p in f:\r\n enPokemon = p.strip().split(\",\")\r\n pokeobjekt=Pokemon(enPokemon[2].strip(), int(enPokemon[3]), int(enPokemon[4]))\r\n pokelist.append(pokeobjekt)\r\n return (pokelist)\r\n" }, { "alpha_fraction": 0.5153102278709412, "alphanum_fraction": 0.5205479264259338, "avg_line_length": 25.577777862548828, "blob_id": "a2273eed8f6afc88f5eb5e5ba55d218baf5cdc13", "content_id": "1eadd026a60635bd20c86954ea6c419a8236f6d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2484, "license_type": "no_license", "max_line_length": 81, "num_lines": 90, "path": "/C-Sharp/Pokedex/MainWindow.xaml.cs", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "using System;\r\nusing System.Collections.Generic;\r\nusing System.IO;\r\nusing System.Linq;\r\nusing System.Text;\r\nusing System.Threading.Tasks;\r\nusing System.Windows;\r\nusing System.Windows.Controls;\r\nusing System.Windows.Data;\r\nusing System.Windows.Documents;\r\nusing System.Windows.Input;\r\nusing System.Windows.Media;\r\nusing System.Windows.Media.Imaging;\r\nusing System.Windows.Navigation;\r\nusing System.Windows.Shapes;\r\n\r\nnamespace Pokedex\r\n{\r\n \r\n public partial class MainWindow : Window\r\n {\r\n private List<Pokemon> _pokemons;\r\n\r\n public MainWindow()\r\n {\r\n InitializeComponent();\r\n _pokemons = Ladda.LaddaPokemons();\r\n }\r\n \r\n public void RedrawPokemons (List<Pokemon> pokemons)\r\n {\r\n\r\n Results.Children.Clear();\r\n\r\n var nameL = new Label();\r\n var hpL = new Label();\r\n var atkL = new Label();\r\n\r\n nameL.Content = \"Namn\";\r\n hpL.Content = \"HP\";\r\n atkL.Content = \"Atk\";\r\n\r\n Grid.SetColumn(nameL, 0);\r\n Grid.SetColumn(hpL, 1);\r\n Grid.SetColumn(atkL, 2);\r\n\r\n Grid.SetRow(nameL, 0);\r\n Grid.SetRow(hpL, 0);\r\n Grid.SetRow(atkL, 0);\r\n\r\n Results.RowDefinitions.Add(new RowDefinition());\r\n\r\n Results.Children.Add(nameL);\r\n Results.Children.Add(hpL);\r\n Results.Children.Add(atkL);\r\n\r\n for (int i = 0; i < pokemons.Count; i++)\r\n {\r\n var pokemon = pokemons[i];\r\n\r\n var name = new Label();\r\n var hp = new Label();\r\n var atk = new Label();\r\n\r\n name.Content = pokemon.PokemonName;\r\n hp.Content = pokemon.HP;\r\n atk.Content = pokemon.Atk;\r\n\r\n Grid.SetColumn(name, 0);\r\n Grid.SetColumn(hp, 1);\r\n Grid.SetColumn(atk, 2);\r\n\r\n Grid.SetRow(name, i+1);\r\n Grid.SetRow(hp, i+1);\r\n Grid.SetRow(atk, i+1);\r\n\r\n Results.RowDefinitions.Add(new RowDefinition());\r\n\r\n Results.Children.Add(name);\r\n Results.Children.Add(hp);\r\n Results.Children.Add(atk);\r\n }\r\n }\r\n\r\n private void SearchBox_TextChanged(object sender, TextChangedEventArgs e)\r\n {\r\n RedrawPokemons(Filter.FiltreraPokemons(SearchBox.Text, _pokemons));\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 21.46666717529297, "blob_id": "f3282c7c31c1bec7c7e8ca5eb427347bc4897c33", "content_id": "3fa5f092982fcb476bb0d3c287b468df9f7172ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 352, "license_type": "no_license", "max_line_length": 91, "num_lines": 15, "path": "/C-Sharp/Pokedex/Pokemon.cs", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "namespace Pokedex\r\n{\r\n public class Pokemon\r\n {\r\n public string PokemonName { get; set; }\r\n public int HP { get; set; }\r\n public int Atk { get; set; }\r\n\r\n public override string ToString()\r\n {\r\n return $\"Pokemon: ${this.PokemonName} \\t HP: ${this.HP} \\t Atk: ${this.Atk}\\n\";\r\n }\r\n }\r\n\r\n}" }, { "alpha_fraction": 0.679411768913269, "alphanum_fraction": 0.6823529601097107, "avg_line_length": 24, "blob_id": "179a4285256f2592f4525de8f1a5d2476e3c8bf5", "content_id": "19e2bdc4ec1cdece098c7af7108237ec36d36b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 82, "num_lines": 13, "path": "/Python/pokedex.py", "repo_name": "isakagren/StudioV-Pokedex", "src_encoding": "UTF-8", "text": "import sys\r\nimport io\r\nfrom Ladda import *\r\nfrom Filter import *\r\n\r\npokelist = skapa_pokemonlista()\r\n\r\nprint(\"Ange sokning\")\r\nsok = sys.stdin.readline()\r\n\r\nfiltrerade_pokemons = filtrera_pokemons(sok[:-1], pokelist)\r\nfor pokemon in filtrerade_pokemons:\r\n print(pokemon.namn + \"\\t \\t \" + str(pokemon.liv) + \"\\t\" + str(pokemon.attack))\r\n\r\n" } ]
7
Gcarrascoa/DemonioConRecurrencia
https://github.com/Gcarrascoa/DemonioConRecurrencia
2916e29f302bb57d5249ce1b1569e1c318a98278
d4d6d8a839b19bdb71fb0f6e9e87faf921172f63
bb4c5542d073f8c97e2153db86f02a810ff25a31
refs/heads/master
2020-06-13T00:34:39.757839
2019-06-30T04:39:48
2019-06-30T04:39:48
194,475,228
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4586006700992584, "alphanum_fraction": 0.4812099039554596, "avg_line_length": 33.82978820800781, "blob_id": "a6cf0ccb964f0b2d18fd3fba9a4e07039a2460ba", "content_id": "b6a0e9103dfc15c25fe5e67eb7767c2908d3e980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3273, "license_type": "no_license", "max_line_length": 82, "num_lines": 94, "path": "/main2.py", "repo_name": "Gcarrascoa/DemonioConRecurrencia", "src_encoding": "UTF-8", "text": "def agregarHorizontal(matriz,posicionI,posicionJ,valor):\n contador=0\n if(matriz[posicionI][posicionJ]==0 and matriz[posicionI][posicionJ+1]==0):\n while(contador<2):\n matriz[posicionI][posicionJ]=valor\n posicionJ=posicionJ+1\n contador=contador+1\n elif(matriz[posicionI][posicionJ]==0 and matriz[posicionI][posicionJ-1]==0):\n while(contador<2):\n matriz[posicionI][posicionJ]=valor\n posicionJ=posicionJ-1\n contador=contador+1\n return matriz\ndef agregarVertical(matriz,posicionI,posicionJ,valor):\n contador=0\n entro=0\n while(contador<2):\n if(matriz[posicionI][posicionJ]==0):\n if(posicionI+1==n):\n if(entro==1):\n matriz[posicionI][posicionJ]=valor\n return matriz\n else:\n entro=entro+1\n matriz[posicionI][posicionJ]=valor\n contador=contador+1\n posicionI=posicionI+1\n \n return matriz\n\ndef recorrerMatriz(matriz,n,m,i=0,j=0,contador=0): \n if(i<n and j<m):\n contador=contador+1\n if(i%2==0):\n if (contador==0 and matriz[i][j]==0):\n matriz=agregarHorizontal(matriz,i,j,contador)\n matriz=recorrerMatriz(matriz,n,m,i,j+2,contador)\n elif (matriz[i][j]==0):\n if(j==m-2 or j==m-1):\n matriz=agregarVertical(matriz,i,j,contador)\n if(j==m-1):\n if(matriz[i-1][j-1]!=0):\n matriz=recorrerMatriz(matriz,n,m,i+1,j-2,contador)\n else:\n matriz=recorrerMatriz(matriz,n,m,i+1,j-1,contador)\n else:\n matriz=recorrerMatriz(matriz,n,m,i,j+1,contador) \n else:\n matriz=agregarHorizontal(matriz,i,j,contador)\n matriz=recorrerMatriz(matriz,n,m,i,j+2,contador)\n\n elif(i%2!=0):\n if(j%2==0 and j>1):\n j=j+1\n if(j==m-1):\n j=j-2\n if (matriz[i][j]==0):\n if((j==0 and i>0) or (j==1 and i>0)):\n matriz=agregarVertical(matriz,i,j,contador)\n if(j==0): \n matriz=recorrerMatriz(matriz,n,m,i+1,j+2,contador)\n else:\n \n matriz=recorrerMatriz(matriz,n,m,i,j-1,contador) \n else:\n \n matriz=agregarHorizontal(matriz,i,j,contador)\n matriz=recorrerMatriz(matriz,n,m,i,j-2,contador)\n if(j==1 and i==n-1):\n matriz=agregarHorizontal(matriz,i,j,contador)\n return matriz\n\n\n\n#principal\nn = 7\nm = 7\n#a = n*m\nmatriz = []\n\nfor i in range(n):\n matriz.append([])\n for j in range(m):\n matriz[i].append(0)\n\nmatriz = recorrerMatriz(matriz,n,m)\n#print(matriz)\nprint(matriz[0])\nprint(matriz[1])\nprint(matriz[2])\nprint(matriz[3])\nprint(matriz[4])\nprint(matriz[5])\nprint(matriz[6])" } ]
1
uvsq21922702/TER
https://github.com/uvsq21922702/TER
536f8b79e3686b6778ea91b4170108f73812392e
23f85cc607270072f213875c557d4628611b743e
cc5520fac323b2621a8d11c052b04d639a7f0500
refs/heads/main
2023-03-20T08:27:37.440338
2021-03-07T13:24:34
2021-03-07T13:24:34
344,931,986
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.672535240650177, "alphanum_fraction": 0.6813380122184753, "avg_line_length": 22.66666603088379, "blob_id": "604649a668006085d49f87648c3a4508b170612d", "content_id": "1edc3cd30906ee3ff8d67a67e838e17222877c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 186, "num_lines": 24, "path": "/main.py.py", "repo_name": "uvsq21922702/TER", "src_encoding": "UTF-8", "text": "import os\nimport requests\nimport schedule\nimport time\n\nindex = {'count': 0}\n\n\ndef func(index):\n url = 'https://opendata.saemes.fr/explore/dataset/places-disponibles-parkings-saemes/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B'\n r = requests.get(url)\n\n with open(os.path.join(r\"file%s.csv\" % index['count']), 'wb') as f:\n f.write(r.content)\n print(\"File downloaded\")\n\n index['count'] += 1\n\n\nschedule.every(3).minutes.do(func, index)\n\nwhile True:\n schedule.run_pending()\n time.sleep(2)\n" } ]
1
kondrashev-y/dj_project
https://github.com/kondrashev-y/dj_project
fae3dcbb722f5d71b9ed1acb401cdfb77bc2bad7
cdecf904101bed865c1a76bc0a305d1e3d25d81d
ba54a6b0fda9891f7d2f8ae559ce9a023a56899f
refs/heads/master
2023-01-30T15:14:37.128250
2020-12-14T18:05:19
2020-12-14T18:05:19
309,638,165
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5715835094451904, "alphanum_fraction": 0.575921893119812, "avg_line_length": 29.700000762939453, "blob_id": "07c23e032f2b57f20d577a07f1b6b3b4e5c248e0", "content_id": "80a942f326499af47e7c6a52a6e1f39774cd5da0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 64, "num_lines": 30, "path": "/dj_project/movies/service.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "import requests\nimport xml.etree.cElementTree as ET\n\n\ndef get_rating(id_film):\n if id_film == 0:\n imdb_rating = '-'\n else:\n url = f'https://rating.kinopoisk.ru/{id_film}.xml'\n r = requests.get(url)\n rating = {}\n # rating_list = []\n response_xml_as_string = str(r.content)[2:-1]\n responseXml = ET.fromstring(response_xml_as_string)\n for item in responseXml.iterfind('.//'):\n rating[item.tag] = item.text\n # rating_list.append(item.text)\n # print(item.tag, item.text, item.attrib)\n imdb_rating = rating['imdb_rating']\n return imdb_rating\n\n\n# def get_client_ip(request):\n# \"\"\"Получение IP пользоваеля\"\"\"\n# x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n# if x_forwarded_for:\n# ip = x_forwarded_for.split(',')[0]\n# else:\n# ip = request.META.get('REMOTE_ADDR')\n# return ip\n\n" }, { "alpha_fraction": 0.6186803579330444, "alphanum_fraction": 0.622107982635498, "avg_line_length": 29.736841201782227, "blob_id": "9240c6338c1fb74d8000510542756406e4a704a7", "content_id": "16123d2e600f3ac7666218ab056feb11e0815fd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 97, "num_lines": 38, "path": "/dj_project/movies/forms.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django import forms\n\nfrom .models import Reviews, Rating, RingStar\nfrom snowpenguin.django.recaptcha3.fields import ReCaptchaField\n\nclass ReviewForm(forms.ModelForm):\n \"\"\"Форма отзывов\"\"\"\n captcha = ReCaptchaField()\n\n class Meta:\n model = Reviews\n fields = ('name', 'email', 'text', 'captcha')\n widgets = {\n 'name': forms.TextInput(attrs={\"class\": \"form-control border\"}),\n 'email': forms.EmailInput(attrs={\"class\": \"form-control border\"}),\n 'text': forms.Textarea(attrs={\"class\": \"form-control border\", 'rows': 3, 'cols': 60})\n }\n\n\nclass RatingForm(forms.ModelForm):\n \"\"\"Форма добавления рейтинга\"\"\"\n star = forms.ModelChoiceField(\n queryset=RingStar.objects.all(), widget=forms.RadioSelect(), empty_label=None,\n )\n\n class Meta:\n model = Rating\n fields = (\"star\",)\n\n# class AvrgRatingForm(forms.ModelForm):\n# \"\"\"Форма среднего рейтинга\"\"\"\n# avrgstar = forms.ModelChoiceField(\n# queryset=RingStar.objects.all(), widget=forms.RadioSelect(), empty_label=None,\n# )\n#\n# class Meta:\n# model = Rating\n# fields = (\"avrgstar\",)" }, { "alpha_fraction": 0.6171920895576477, "alphanum_fraction": 0.6236538290977478, "avg_line_length": 29.2130184173584, "blob_id": "49dfa39d1459b267821f4968238eba2f75a5871a", "content_id": "89c35e66881591800d818af2428f82c94d897b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5456, "license_type": "no_license", "max_line_length": 97, "num_lines": 169, "path": "/dj_project/movies/admin.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django import forms\nfrom django.forms import Textarea\nfrom django.utils.safestring import mark_safe\nfrom django.db import models\n\nfrom .models import Category, Genre, Actor, RingStar, Reviews, Rating, Movie, MovieShots\n\n\nfrom ckeditor_uploader.widgets import CKEditorUploadingWidget\n\n\nclass MovieAdminForm(forms.ModelForm):\n description = forms.CharField(label=\"Описание\", widget=CKEditorUploadingWidget())\n\n class Meta:\n model = Movie\n fields = '__all__'\n\n\[email protected](Category)\nclass CategoryAdmin(admin.ModelAdmin):\n \"\"\"Категории\"\"\"\n list_display = (\"id\", \"name\", \"url\")\n list_display_links = (\"name\",)\n\n\nclass ReviewInline(admin.TabularInline):\n model = Reviews\n formfield_overrides = {models.TextField: {'widget': Textarea(attrs={'rows': 5, 'cols': 90})}}\n extra = 0 # количество пустых дополнительных полей\n readonly_fields = (\"name\", \"email\")\n\n\nclass MovieShotsInline(admin.TabularInline):\n model = MovieShots\n formfield_overrides = {models.TextField: {'widget': Textarea(attrs={'rows': 3, 'cols': 40})}}\n extra = 0 # количество пустых дополнительных полей\n readonly_fields = (\"get_image\",)\n\n def get_image(self, obj):\n return mark_safe(f'<img src={obj.image.url} width=\"120\" height=\"80\"')\n\n get_image.short_description = \"Изображение\"\n\n\[email protected](Movie)\nclass MovieAdmin(admin.ModelAdmin):\n \"\"\"Фильмы\"\"\"\n list_display = (\"title\", \"category\", \"url\", \"druft\", \"kp_id\")\n list_filter = (\"category\", \"year\")\n search_fields = (\"title\", \"category__name\")\n inlines = [MovieShotsInline, ReviewInline]\n save_on_top = True\n save_as = True\n list_editable = (\"druft\",)\n actions = [\"publish\", \"unpublish\"]\n form = MovieAdminForm\n readonly_fields = (\"get_image\",)\n # fields = ((\"actors\", \"directors\", \"genres\"), ) # Будет скрывать поля которые не выбраны\n fieldsets = (\n (None, {\n \"fields\": ((\"title\", \"tagline\"), )\n }),\n (None, {\n \"fields\": ((\"description\"), \"poster\", (\"category\", \"get_image\"), )\n }),\n (None, {\n \"fields\": ((\"year\", \"world_premiere\", \"country\"), )\n }),\n (\"Actors\", {\n \"classes\": (\"collapse\",),\n \"fields\": ((\"actors\", \"directors\", \"genres\"),)\n }),\n (None, {\n \"fields\": ((\"budget\", \"fees_in_usa\", \"fees_in_world\"),)\n }),\n (\"Options\", {\n \"fields\": ((\"url\", \"druft\", \"kp_id\"),)\n }),\n )\n\n def get_image(self, obj):\n return mark_safe(f'<img src={obj.poster.url} width=\"80\" height=\"100\"')\n\n def unpublish(self, request, queryset):\n \"\"\"Снять с публикации\"\"\"\n row_update = queryset.update(druft=True)\n if row_update == 1:\n message_bit = \"1 запись обновлена\"\n else:\n message_bit = f\"{row_update} записей обновлено\"\n self.message_user(request, f\"{message_bit}\")\n\n def publish(self, request, queryset):\n \"\"\"Опубликовать\"\"\"\n row_update = queryset.update(druft=False)\n if row_update == 1:\n message_bit = \"1 запись обновлена\"\n else:\n message_bit = f\"{row_update} записей обновлено\"\n self.message_user(request, f\"{message_bit}\")\n\n unpublish.short_description = \"Снять с публикации\"\n unpublish.allowed_permission = ('change', )\n\n publish.short_description = \"Опубликовать\"\n publish.allowed_permission = ('change',)\n\n get_image.short_description = \"Постер\"\n\n\n\n\[email protected](Reviews)\nclass ReviewsAdmin(admin.ModelAdmin):\n \"\"\"Отзывы\"\"\"\n list_display = (\"name\", \"email\", \"parent\", \"movie\", \"id\")\n readonly_fields = (\"name\", \"email\")\n\n\[email protected](Genre)\nclass GenreAdmin(admin.ModelAdmin):\n \"\"\"Жанры\"\"\"\n list_display = (\"name\", \"description\", \"url\")\n\n\[email protected](Actor)\nclass ActorAdmin(admin.ModelAdmin):\n \"\"\"Актеры\"\"\"\n list_display = (\"name\", \"age\", \"get_image\")\n readonly_fields = (\"get_image\",)\n\n def get_image(self, obj):\n return mark_safe(f'<img src={obj.image.url} width=\"50\" height=\"60\"')\n\n get_image.short_description = \"Изображение\"\n\[email protected](Rating)\nclass RatingAdmin(admin.ModelAdmin):\n \"\"\"Рэйтинг\"\"\"\n list_display = (\"movie\", \"id\", \"star\")\n\n\[email protected](MovieShots)\nclass MovieShotsAdmin(admin.ModelAdmin):\n \"\"\"Кадры из фильма\"\"\"\n formfield_overrides = {models.TextField: {'widget': Textarea(attrs={'rows': 3, 'cols': 40})}}\n\n list_display = (\"movie\", \"title\", \"description\", \"get_image\")\n readonly_fields = (\"get_image\",)\n\n def get_image(self, obj):\n return mark_safe(f'<img src={obj.image.url} width=\"60\" height=\"40\"')\n\n get_image.short_description = \"Изображение\"\n\n\n# admin.site.register(Category, CategoryAdmin) # не нужна так как сделали декоратор\n# admin.site.register(Genre)\n# admin.site.register(Actor)\nadmin.site.register(RingStar)\n# admin.site.register(Reviews)\n# admin.site.register(Rating)\n# admin.site.register(Movie)\n# admin.site.register(MovieShots)\n\nadmin.site.site_title = 'Django Movies'\nadmin.site.site_header = 'Django Movies'\n\n" }, { "alpha_fraction": 0.6403133869171143, "alphanum_fraction": 0.6403133869171143, "avg_line_length": 25.961538314819336, "blob_id": "3ef55843b3cc574ce7832a78eaeb1ffeefe31903", "content_id": "6abfa52e873abe4f484047b183ce6a47b68610ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1529, "license_type": "no_license", "max_line_length": 100, "num_lines": 52, "path": "/dj_project/movies/templatetags/breadcrumps_tag.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django import template\nfrom django.urls import reverse\n\nfrom ..models import Category\n\nregister = template.Library()\n\n\nclass ActorList:\n \"\"\"Класс для yrl Актеров и Режиссеров\"\"\"\n get_absolute_url = reverse('actor_list')\n\n def __str__(self):\n return 'Актеры и Режиссеры'\n\n\nclass RatingList:\n \"\"\"Класс для yrl Актеров и Режиссеров\"\"\"\n get_absolute_url = '#' # заглушка вместо reverse('rating_list') пока нет шаблона для рейтинга\n\n def __str__(self):\n return 'Рэйтинг'\n\n\[email protected]_tag('breadcrumbs_template.html', takes_context=True)\ndef breadcrumbs(context):\n\n crumbs = []\n path = context.get('request').path\n if context.get('movie'):\n model = context['movie']\n crumbs.append(model.category)\n crumbs.append(model)\n elif '/category/' in path:\n crumbs.append(get_category(context['view'].kwargs.get('slug')))\n elif '/filter/' in path:\n crumbs.append('Фильтр')\n elif 'search/' in path:\n crumbs.append('Поиск')\n elif 'actor' in path:\n crumbs.append(ActorList)\n if context.get('actor'):\n crumbs.append(context['actor'])\n elif '/rating/' in path:\n crumbs.append(RatingList)\n crumbs.append(context['view'].kwargs.get('rt'))\n return {'crumbs': crumbs}\n\n\ndef get_category(slug):\n category_name = Category.objects.get(url=slug)\n return category_name\n\n\n" }, { "alpha_fraction": 0.6893203854560852, "alphanum_fraction": 0.6893203854560852, "avg_line_length": 33.380950927734375, "blob_id": "4cf63080e8e0981e475bc8d9db1461c247c00f5c", "content_id": "18367c7ed3ad914fcd3afe1c045660ece23c40e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "no_license", "max_line_length": 82, "num_lines": 21, "path": "/dj_project/movies/api/urls.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom .api_views import (\n MovieListApiViews,\n MovieDetailApiViews,\n ReviewCreateApiViews,\n ReviewDestroy,\n RatingCreateApiViews,\n ActorApiListView,\n ActorDetailApiListView,\n)\n\nurlpatterns = [\n path('movie/', MovieListApiViews.as_view(), name='movies_list'),\n path('movie/<int:pk>/', MovieDetailApiViews.as_view(), name='movie_detail'),\n path('review/', ReviewCreateApiViews.as_view()),\n path('review/<int:pk>/', ReviewDestroy.as_view()),\n path('rating/', RatingCreateApiViews.as_view(), name='rating'),\n path('actors/', ActorApiListView.as_view(), name='actor_list'),\n path('actors/<int:pk>/', ActorDetailApiListView.as_view(), name='actor_list'),\n]" }, { "alpha_fraction": 0.6158851981163025, "alphanum_fraction": 0.6183996200561523, "avg_line_length": 31.965852737426758, "blob_id": "48627b980341afb5b4510e4491f3218bb15c4147", "content_id": "cc9149e3e63eef38dd6c4a27a93078b3ff854e7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7146, "license_type": "no_license", "max_line_length": 118, "num_lines": 205, "path": "/dj_project/movies/views.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django.db.models import Q, Avg, F, Count, Sum\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\n\nfrom .models import Movie, Category, Actor, Genre, Rating\nfrom django.views.generic.base import View\nfrom .forms import ReviewForm, RatingForm\n\nfrom django.http import HttpResponse\n\nfrom .service import get_rating\n\n\n\n\nclass GenreYear():\n \"\"\"Жанры и года выхода фильмов\"\"\"\n def get_genres(self):\n return Genre.objects.all()\n\n def get_years(self):\n return Movie.objects.filter(druft=False).distinct().values(\"year\")\n\n\nclass MoviesView(GenreYear, ListView):\n \"\"\"Список фильмов\"\"\"\n model = Movie\n queryset = Movie.objects.filter(druft=False)\n paginate_by = 6\n # template_name = \"movies/movies.html\"\n\n # def get_context_data(self, *args, **kwargs):\n # context = super().get_context_data(*args, **kwargs)\n # context[\"categories\"] = Category.objects.all()\n # return context\n\n\nclass CategoryMoviesView(GenreYear, ListView):\n \"\"\"\"Список фильмов по категории\"\"\"\n model = Movie\n\n def get_queryset(self):\n queryset = Movie.objects.filter(druft=False, category__url=self.kwargs.get('slug'))\n return queryset\n\n\nclass RatingMovieViewList(GenreYear, ListView):\n \"\"\"Список фильмоф по рейтингу\"\"\"\n\n model = Movie\n template_name = 'movies/rating.html'\n\n def get_queryset(self):\n queryset = Movie.objects.filter(druft=False).annotate(avg_rating=Avg('ratings__star'))\n # for i in queryset:\n # print(i, i.avg_rating)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['rt'] = self.kwargs.get('rt')\n print(context)\n return context\n\n\nclass MovieDetailView(GenreYear, DetailView):\n \"\"\"Полное описание фильма\"\"\"\n model = Movie\n slug_field = \"url\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"star_form\"] = RatingForm()\n context[\"form\"] = ReviewForm()\n rq = Movie.objects.get(url=self.kwargs.get('slug'))\n context[\"imdb\"] = get_rating(rq.kp_id)\n st = Rating.objects.filter(movie__url=self.kwargs.get('slug')).aggregate(avg_rating=Avg('star'))[\"avg_rating\"]\n # st1 = Rating.objects.filter(movie__url='terminator').aggregate(avg_rating=Avg('star'))[\"avg_rating\"]\n # print(st1)\n try:\n context[\"avg_rating\"] = str(round(st, 1))\n except TypeError:\n context[\"avg_rating\"] = '0.0'\n # rq = Movie.objects.get(url=self.kwargs.get('slug')) #.values(\"kp_id\")\n # print(rq.kp_id)\n\n return context\n\n\n\n# class MovieDetailView(View):\n # def get(self, request, slug):\n # movie = Movie.objects.get(url=slug)\n # return render(request, \"movies/movie_detail.html\", {\"movie\": movie})\n\n # def get(self, request, pk):\n # movie = Movie.objects.get(id=pk)\n # return render(request, \"movies/movie_detail.html\", {\"movie\": movie})\n\n\nclass AddReviews(View):\n \"\"\"Отзывы\"\"\"\n def post(self, request, pk):\n form = ReviewForm(request.POST)\n movie = Movie.objects.get(id=pk) # не надо если делать через навание столбца см ниже\n if form.is_valid():\n form = form.save(commit=False)\n if request.POST.get(\"parent\", None):\n form.parent_id = int(request.POST.get(\"parent\"))\n form.movie = movie # form.movie_id = pk можно сделать через названия столбца movie_id\n form.save()\n return redirect(movie.get_absolute_url())\n\n\nclass ActorView(GenreYear, DetailView):\n \"\"\"Вывод информации об актере\"\"\"\n model = Actor\n template_name = 'movies/actor.html'\n slug_field = \"name\"\n\n\nclass ActorListView(GenreYear, ListView):\n \"\"\"Вывод списка актеров и режиссеров\"\"\"\n\n model = Actor\n # template_name =\n # def get(self, request, name):\n # actor = Actor.objects.all()\n # return render(request, 'movies/actors.html', context={'actor': actor})\n\n\nclass FilterMoviesView(GenreYear, ListView):\n \"\"\"Фильтр фильмов\"\"\"\n paginate_by = 3\n def get_queryset(self):\n queryset = Movie.objects.filter(\n Q(year__in=self.request.GET.getlist(\"year\")) |\n Q(genres__in=self.request.GET.getlist(\"genre\"))\n ).distinct() # фильтр, там где года будут входить в список возращаемый с фрондэнда\n return queryset\n\n def get_context_data(self, *agrs, **kwargs):\n context = super().get_context_data(*agrs, **kwargs)\n context[\"year\"] = ''.join([f\"year={x}&\" for x in self.request.GET.getlist(\"year\")])\n context[\"genre\"] = ''.join([f\"genre={x}&\" for x in self.request.GET.getlist(\"genre\")])\n return context\n\n\nclass JsonFilterMoviesView(ListView):\n \"\"\"Фильтр фильмов в json\"\"\"\n\n def get_queryset(self):\n queryset = Movie.objects.filter(\n Q(year__in=self.request.GET.getlist(\"year\")) |\n Q(genres__in=self.request.GET.getlist(\"genre\"))\n ).distinct().values(\"title\", \"tagline\", \"url\", \"poster\")\n return queryset\n\n def get(self, request, *args, **kwargs):\n queryset = list(self.get_queryset())\n return JsonResponse({\"movies\": queryset}, safe=False)\n\n\nclass AddStarRating(View):\n \"\"\"Добавление рейтинга фильму\"\"\"\n def get_client_ip(self, request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n\n def post(self, request):\n form = RatingForm(request.POST)\n if form.is_valid():\n Rating.objects.update_or_create(\n ip=self.get_client_ip(request),\n movie_id=int(request.POST.get(\"movie\")),\n defaults={'star_id': int(request.POST.get(\"star\"))}\n )\n return HttpResponse(status=201)\n else:\n return HttpResponse(status=400)\n\n\nclass Search(GenreYear, ListView):\n \"\"\" Поиск фильмов\"\"\"\n paginate_by = 2\n\n def get_queryset(self):\n return Movie.objects.filter(title__icontains=self.request.GET.get(\"q\"))\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"q\"] = f'{self.request.GET.get(\"q\")}&'\n return context\n\n # # Поиск без учета регистра, т.к. бд sqlite не пошла c icontains\n # def get_queryset(self):\n # q = self.request.GET.get(\"q\")\n # a = \"\".join(q[0].upper()) + q[1:]\n # return Movie.objects.filter(title__icontains=a)\n\n\n\n" }, { "alpha_fraction": 0.5807860493659973, "alphanum_fraction": 0.5807860493659973, "avg_line_length": 19.909090042114258, "blob_id": "25c5918be04a095b17ed09f41ddbe0e15b30bf14", "content_id": "f5db8f550d0b7b04b39037205e9db5e60e956715", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 38, "num_lines": 11, "path": "/dj_project/contact/service.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django.core.mail import send_mail\n\n\ndef send(user_email):\n send_mail(\n 'Вы подписались на рассылку',\n 'Это информационное письмо.',\n '[email protected]',\n [user_email],\n fail_silently=False,\n )" }, { "alpha_fraction": 0.4295257329940796, "alphanum_fraction": 0.44088175892829895, "avg_line_length": 35.53658676147461, "blob_id": "da01f450fc69a79876456c36e0017f71da698cb0", "content_id": "b5d9812ae931d9e82f9225402a93e6bcb5796b00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1506, "license_type": "no_license", "max_line_length": 96, "num_lines": 41, "path": "/dj_project/movies/templates/movies/actor_list.html", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "{% extends 'movies/base.html' %}\n{% load static %}\n{% block title %} {{ actor }} {% endblock title %}\n{% block container %}\n<div class=\"container py-md-3\">\n{% endblock container %}\n\n{% block sidebar %}\n <div class=\"side-bar col-lg-4\">\n{% endblock sidebar %}\n\n{% block movie %}\n <div class=\"col-md-6 product-men\">\n{% for actor in actor_list %}\n <div class=\"card mb-3\" style=\"max-width: 640px; max-height: 280px;\">\n <div class=\"row no-gutters\">\n <div class=\"col-md-4\">\n <img src=\"{{ actor.image.url }}\" class=\"card-img img-fluid\" alt=\"\">\n </div>\n <div class=\"col-md-8\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">{{ actor }}</h5>\n <div style=\"height:100px;\">\n <p class=\"card-text\">{% for movie in actor.film_director.all %}\n <a href=\"{{ movie.get_absolute_url }}\">{{ movie.title }}<a/>\n {% endfor %}\n {% for movie in actor.film_actor.all %}\n <a href=\"{{ movie.get_absolute_url }}\">{{ movie.title }}, </a>\n {% endfor %}</p>\n </div>\n <a href=\"{{ actor.get_absolute_url }}\" class=\"btn btn-primary\">Подробней</a>\n\n </div>\n </div>\n </div>\n </div>\n{% endfor %}\n </div>\n{% endblock movie %}\n </div>\n</div>" }, { "alpha_fraction": 0.7006134986877441, "alphanum_fraction": 0.7033742070198059, "avg_line_length": 29.467288970947266, "blob_id": "638cd2dc237868032f30d5fc05f377195019b328", "content_id": "df52ec35c5334ee22bc246bc2b836577f6f720fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3481, "license_type": "no_license", "max_line_length": 105, "num_lines": 107, "path": "/dj_project/movies/api/api_views.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, CreateAPIView, DestroyAPIView\nfrom rest_framework import permissions\n\nfrom ..models import Movie, Actor, Reviews\nfrom .serializers import (\n MovieListSerializer,\n MovieDetailsSerializer,\n ReviewCreateSerializer,\n RatingCreateSerializer,\n ActorSerializer,\n ActorDetailSerializer,\n ReviewSerializer,\n)\n\nfrom .service import get_client_ip, MovieFilter\n\nfrom django_filters.rest_framework import DjangoFilterBackend\n\n\nclass MovieListApiViews(ListAPIView):\n \"\"\"Вывод списков фильмов в API\"\"\"\n\n serializer_class = MovieListSerializer\n filter_backends = (DjangoFilterBackend,)\n filterset_class = MovieFilter\n\n def get_queryset(self):\n movies = Movie.objects.filter(druft=False).annotate(\n rating_user=models.Count(\"ratings\", filter=models.Q(ratings__ip=get_client_ip(self.request)))\n ).annotate(\n # middle_star=models.Sum(models.F('ratings__star')) / models.Count(models.F('ratings'))\n middle_star=models.Avg('ratings__star')\n )\n return movies\n\n\n# class MovieDetailApiViews(APIView):\n# \"\"\"Вывод детальной информации в API\"\"\"\n# def get(self, request, pk):\n# movies = Movie.objects.get(druft=False, id=pk)\n# serializer = MovieDetailsSerializer(movies)\n# return Response(serializer.data)\n\nclass MovieDetailApiViews(RetrieveAPIView):\n \"\"\"Вывод детальной информации в API\"\"\"\n\n queryset = Movie.objects.filter(druft=False)\n serializer_class = MovieDetailsSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass ReviewDestroy(DestroyAPIView):\n \"\"\"Удаление отзыва\"\"\"\n queryset = Reviews.objects.all()\n # permission_classes = ReviewSerializer\n permission_classes = [permissions.IsAdminUser]\n\n\n# class ReviewCreateApiViews(APIView):\n# \"\"\"Добавление отзыва к фильму\"\"\"\n#\n# def post(self, request):\n# review = ReviewCreateSerializer(data=request.data)\n# if review.is_valid():\n# review.save()\n# return Response(status=201)\n\nclass ReviewCreateApiViews(CreateAPIView):\n \"\"\"Добавление отзыва к фильму\"\"\"\n\n serializer_class = ReviewCreateSerializer\n\n# class RatingCreateApiViews(APIView):\n# \"\"\"Добавление рейтинга к фильму\"\"\"\n#\n# def post(self, request):\n# serializer = RatingCreateSerializer(data=request.data)\n# if serializer.is_valid():\n# serializer.save(ip=get_client_ip(request))\n# return Response(status=201)\n# else:\n# return Response(status=400)\n\nclass RatingCreateApiViews(CreateAPIView):\n \"\"\"Добавление рейтинга к фильму\"\"\"\n\n serializer_class = RatingCreateSerializer\n\n def perform_create(self, serializer):\n serializer.save(ip=get_client_ip(self.request))\n\n\n\nclass ActorApiListView(ListAPIView):\n \"\"\"Вывод списка актеров\"\"\"\n serializer_class = ActorSerializer\n queryset = Actor.objects.all()\n\n\nclass ActorDetailApiListView(RetrieveAPIView):\n \"\"\"Вывод информации об актере\"\"\"\n serializer_class = ActorDetailSerializer\n queryset = Actor.objects.all()\n lookup_field = 'pk'\n" }, { "alpha_fraction": 0.678787887096405, "alphanum_fraction": 0.678787887096405, "avg_line_length": 48.5, "blob_id": "a1722f3a499b587091f6e254848438973cfce475", "content_id": "32799fca6f6c62558e89044e2c4c35e3d8d62eec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 165, "num_lines": 20, "path": "/dj_project/movies/urls.py", "repo_name": "kondrashev-y/dj_project", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.MoviesView.as_view()),\n path(\"filter/\", views.FilterMoviesView.as_view(), name='filter'), # поставлен специально перед урл муви слаг, для того что бы урл не попадал по запрос по фильму\n path(\"search/\", views.Search.as_view(), name='search'),\n path(\"category/<slug:slug>/\", views.CategoryMoviesView.as_view(), name='category_list'),\n path(\"rating/<int:rt>\", views.RatingMovieViewList.as_view(), name='rating_list'),\n path(\"json-filter/\", views.JsonFilterMoviesView.as_view(), name='json_filter'),\n path(\"add-rating/\", views.AddStarRating.as_view(), name='add_rating'),\n path(\"movie/<slug:slug>/\", views.MovieDetailView.as_view(), name=\"movie_detail\"),\n path(\"review/<int:pk>/\", views.AddReviews.as_view(), name=\"add_review\"),\n path(\"actor/<str:slug>\", views.ActorView.as_view(), name='actor_detail'),\n path(\"actors/\", views.ActorListView.as_view(), name='actor_list'),\n\n\n]\n" } ]
10
MariaTsareva/DataBases_2018
https://github.com/MariaTsareva/DataBases_2018
34660cf2baf4989f60b39a4b13efc601506c956d
f1d7f0acb795d76e9944af53a2764d33d02abaf6
a6152b200c89c712bba943f39ce4839246cee3a8
refs/heads/master
2020-04-02T02:02:50.357692
2018-10-20T10:10:51
2018-10-20T10:10:51
153,888,432
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5535649061203003, "alphanum_fraction": 0.5597805976867676, "avg_line_length": 34.01315689086914, "blob_id": "0e9122c33f23cf5c11063a2aeb86af938016933b", "content_id": "a0fe5f7100619793a6d5a1af83d70f5a826a9779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2735, "license_type": "no_license", "max_line_length": 122, "num_lines": 76, "path": "/Database/db.py", "repo_name": "MariaTsareva/DataBases_2018", "src_encoding": "UTF-8", "text": "import sqlite3\r\nfrom datetime import datetime\r\nimport numpy as np\r\nfrom flask import Flask, render_template, request, url_for, redirect\r\napp = Flask(__name__)\r\n\r\n\r\ndef days(dep_date, arr_date):\r\n date_format = \"%d/%m/%Y\"\r\n dep_date = datetime.strptime(dep_date, date_format)\r\n arr_date = datetime.strptime(arr_date, date_format)\r\n delta = arr_date - dep_date\r\n return delta.days\r\n\r\n\r\[email protected]('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\[email protected]('/newtrip')\r\ndef trip():\r\n return render_template('trip.html')\r\n\r\n\r\[email protected]('/result', methods=['GET', 'POST'])\r\ndef result():\r\n conn = sqlite3.connect('reise.db')\r\n c = conn.cursor()\r\n if request.method == 'POST':\r\n try:\r\n emp = request.form['Employee']\r\n print(type(emp))\r\n dep_date = request.form['Departure date']\r\n arr_date = request.form['Arrival date']\r\n country = request.form['Country']\r\n # with sqlite3.connect(\"reise.db\") as conn:\r\n # c = conn.cursor()\r\n # query =\r\n # print(query)\r\n emp_id = c.execute(\"SELECT id_employee FROM Employees WHERE name = ?\", (emp,))\r\n emp_id = emp_id.fetchone()[0]\r\n print('test', emp_id)\r\n country_id = c.execute(\"SELECT id_country FROM Countries WHERE country = ?\", (country,)).fetchone()[0]\r\n days_ = days(dep_date, arr_date)\r\n rate = c.execute(\"SELECT rate FROM Countries WHERE country = ?\", (country,)).fetchone()[0]\r\n pay_day = c.execute(\"SELECT day_payment FROM Countries WHERE country = ?\", (country,)).fetchone()[0]\r\n sum_ = np.round(days_ * rate * pay_day, 2)\r\n c.execute(\"INSERT INTO Trips (employee_id, dep_date ,arr_date, country_id, sum) VALUES (?,?,?,?,?)\", \\\r\n (emp_id, dep_date, arr_date, country_id, sum_))\r\n # c.execute(\"INSERT INTO Trips (sum) SELECT dep_date, arr_date, arr_date - dep_date FROM Trips \")\r\n conn.commit()\r\n msg = \"Record successfully added\"\r\n return render_template(\"result.html\", msg=msg)\r\n # return redirect(url_for('trip'), msg=msg, code=307)\r\n except:\r\n conn.rollback()\r\n msg = \"error in insert operation\"\r\n finally:\r\n return render_template(\"result.html\", msg=msg)\r\n\r\n\r\[email protected]('/list')\r\ndef list():\r\n conn = sqlite3.connect(\"reise.db\")\r\n conn.row_factory = sqlite3.Row\r\n\r\n c = conn.cursor()\r\n c.execute(\"SELECT * FROM Trips\")\r\n\r\n rows = c.fetchall()\r\n return render_template(\"list.html\",rows = rows)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='localhost', port=5003,debug = True)" } ]
1
Nicholas-Fabugais-Inaba/Computing_Lab_Assignments
https://github.com/Nicholas-Fabugais-Inaba/Computing_Lab_Assignments
0823639d9f7001fc923d3b52e2c4f8c714a40191
696753f88360b418d6905c887d3d61590d170353
2eb7f100af76d5d186df9fa0afe81deb912c606a
refs/heads/main
2023-04-10T15:22:30.392751
2021-03-23T02:40:58
2021-03-23T02:40:58
343,535,690
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5976252555847168, "alphanum_fraction": 0.6061975359916687, "avg_line_length": 52.123077392578125, "blob_id": "148dbdbd18b3e7a683cc7105a120ac86acecdfef", "content_id": "ca385f3bcab0cccf203761d16e0b6ec00ee5eb10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17289, "license_type": "no_license", "max_line_length": 703, "num_lines": 325, "path": "/fabugain_CL6.py", "repo_name": "Nicholas-Fabugais-Inaba/Computing_Lab_Assignments", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Computing 6 Assignment\n# \n# \n\n# ---\n# ## Background\n# \n# In this assignment you will be implementing a portion of a Geographic Information System (GIS). A GIS is a computer system used to organize, categorize, and analyze geographical data in order to produce accurate depiction of the real world. The system uses multiple layers of information to achieve this task. The data layers are split into a grid and represented as a matrix with **m** rows and **n** columns where each entry in the matrix contains the type of land at that point on the map. An entry **A<sub>ij</sub>** is the *i*th row and *j*th column in our map matrix. We assume that **A<sub>00</sub>** is the first element in our matrix. The graphic below will assist in visualizing the process:\n# \n# ![Comp6.png](attachment:Comp6.png)\n# \\begin{align}\n# \\texttt{Figure 1}\n# \\end{align}\n# \n# \n# As seen in the previous example, our GIS utilizes **6** different data layers. We call these layers the **map types** as they classify regions of different land on our map. Thus, each entry in our map matrix can be **one** of the 6 map types.\n# \n# -\tTransportation (T)\n# -\tAgricultural (A)\n# -\tResidential (R)\n# -\tCommercial (C)\n# -\tWater (W)\n# -\tUndeveloped land (U)\n# \n# Our GIS will store the map information as a list of lists. If we have a list named **map**, then map[i][j] will store the map type at row i, column j. Each entry will contain a string that corresponds to 1 of the 6 possible map types listed above. The list representation of the map in **Figure 1** is shown below:\n# \n# \n# ```\n# [['A','A','A','A','U','U','U','U'], \n# ['A','A','A','A','U','R','R','R'], \n# ['W','W','W','W','T','T','T','T'], \n# ['W','W','W','W','T','R','R','R'],\n# ['C','C','U','U','T','R','U','U'], \n# ['T','T','T','T','T','T','U','U'], \n# ['U','U','U','U','T','R','U','U']]\n# ```\n# \n# One usage of the system is to be able to easily identify whether or not a piece of land (entry in the map matrix) is deemed **commercially buildable**. A piece of land at **A<sub>ij</sub>** is deemed commercially buildable if the following conditions hold:\n# -\tThe entry at **A<sub>ij</sub>** has map type **U**\n# -\tThe entry **A<sub>ij</sub>** is not on the edges of the map (the first and last rows and columns).\n# -\tThe entry **A<sub>ij</sub>** is not adjacent with an entry of map type **R** or map type **A**. Note that adjacent entries are entries to the top, bottom, left, and right of the current cell.\n# \n# Based on the criteria and the map representation of **Figure 1**, it can be seen that **A<sub>4,2</sub>** is commercially buildable and **A<sub>1,4</sub>** is not commercially buildable. \n# \n# Please read the requirements below to implement the GIS system!\n# \n\n# ---\n# ## Additional Information\n# When using a 2D list, we can access elements around a specific index. Given the element at location i,j we can find the adjacent element within the same row by changing the row index. If we want to access the element to the *left* of our selected element, we can subtract 1 from the j index. To access the element to the right, we can add 1 to the j index. To access the element in the previous row (above the element), we can subtract 1 from the i index. To access the element in the next row (below the element), we can add 1 to the i index.\n\n# In[ ]:\n\n\nx = [[1,2,3],\n [4,5,6], \n [7,8,9]]\ni=1\nj=1\nprint(x[i][j])\nprint(x[i-1][j]) # above\nprint(x[i][j+1]) # right\n\n\n# Be careful when accessing adjacent elements - if you try to access an element that doesn't exist, you might receive unexpected output, or an error!\n\n# In[ ]:\n\n\nprint(x[i-2][j]) # 2 above - actually wraps around and gives us the element in row -1 (which is the last row)\nprint(x[i][j+2]) # 2 right - tries to access value in column 3 (which doesn't exist)\n\n\n# ---\n# ## NOTE THAT YOU WILL BE MARKED ON MULTIPLE ITEMS IN THIS LAB IN ADDITION TO THE FUNCTIONALITY OF YOUR CODE\n# - Variable Names\n# - Commenting\n# - General Legibility\n# - Reflective Questions\n\n# ---\n# ## Program Requirements (12 Marks)\n# \n# Your GIS system will be comprised of a set of functions used to analyze the information of any given map. In addition, you will be creating a function used to determine whether or not a piece of land is commercially buildable. The requirements of the system are given below. Please ensure that your functions have the EXACT naming as specified! Failure to do so will result in lost marks.\n# \n# 1. Define a function **countType**(*map_data*, *map_type*):\n# - ***map_data***: A *list of lists* representing the data for a given map.\n# - ***map_type***: A *string* representing a map type ('T','A','R','C','W', or 'U')\n# - **Return:** An *integer* representing the number of times *map_type* occurs in *map_data*.\n# \n# \n# 2.\tDefine a function **classifyMap**(*map_data*):\n# -\t***map_data***: A *list of lists* representing the data for a given map.\n# -\t**Return**: A map classification according to the following rules:\n# -\tThe *string* **Suburban** if the number of 'R' cells is greater than 50% of all cells.\n# - The *string* **Farmland** if the number of 'A' cells is greater than 50% of all cells.\n# - The *string* **Conservation** if the number of 'U' cells plus the number of 'W' cells is greater than 50% of all cells.\n# - The *string* **City** if the number of 'C' cells is greater than 50% of all cells and the number of 'U' cells plus the number of 'A' cells is between 10% and 20% of all cells (inclusive).\n# - The *string* **Mixed** if none of the above criteria are met. \n# _(Hint, use your countType function coupled with the fact that the total cells in map\\_data is given by m*n)_\n# \n# \n# 3.\tDefine a function **isolateType**(*map_data*, *map_type*):\n# -\t***map_data***: A *list of lists* representing the data for a given map.\n# -\t***map_type***: A *string* representing a map type (‘T’, ‘A’, ‘R’, ‘C’, ‘W’, or ‘U’)\n# -\t**Return**: A new *list of lists* that represent *map_data* as a matrix but all entries that **are not** equal to *map_type* are replaced with a string containing only a space (\" \"). \n# \n# \n# 4.\tDefine a function **commerciallyBuildable**(*map_data*, *i*, *j*):\n# -\t***map_data***: A *list of lists* representing the data for a given map.\n# -\t***i***: An *integer* representing a given row in *map_data*.\n# -\t***j***: An *integer* representing a given column in *map_data*.\n# -\t**Return**: **True** if *map_data[i][j]* is commercially buildable, otherwise **False**. (Refer to the background section to determine what is deemed commercially buildable)\n\n# ---\n# ## Implementation\n# Please define all functions in the cell below\n\n# In[28]:\n\n\ndef countType(map_data, map_type):\n count = 0 #initializes a count variable as int to determine the occurence\n for i in range(len(map_data)): #of the map_type in map_data\n for j in range(len(map_data[i])): #'for i' for loop looks at the each row individually\n count += map_data[i][j].count(map_type) #'for j' for loop looks at each column of a row at index 'i'\n return count #the 'count +=' adds the amount of occurences of the map_type\n #in the specific row at index 'i'\ndef classifyMap(map_data):\n rows = len(map_data)\n columns = len(map_data[0]) #total_cells is able to determine the number of cells in the entire\n total_cells = rows*columns #2d array by multiplying the amount of rows by the amount of columns\n map_class = \"\" #initializes a class variable that will be reinitialized to the\n if countType(map_data, 'R') > (total_cells/2): #specific map classification\n map_class = \"Suburban\" #each if statement is following the criteria listed in the program\n elif countType(map_data, 'A') > (total_cells/2): #requirements\n map_class = \"Farmland\"\n elif (countType(map_data, 'U') + countType(map_data, 'W')) > (total_cells/2):\n map_class = \"Conservation\"\n elif countType(map_data, 'C') > (total_cells/2) and (countType(map_data, 'U') + countType(map_data, 'A')) >= (total_cells*0.1) and (countType(map_data, 'U') + countType(map_data, 'A')) <= (total_cells*0.2):\n map_class = \"City\"\n else:\n map_class = \"Mixed\"\n return map_class #the specific map classification is returned\n \ndef isolateType(map_data, map_type):\n for i in range(len(map_data)):\n for j in range(len(map_data[i])):\n if map_data[i][j] != map_type: #if the map type at the row of index 'i' and column of index 'j' \n map_data[i][j] = \" \" #is not the same as map_type, leave it as a blank space and\n return map_data #return a new 2d array of just the variable map_type\n \n \ndef commerciallyBuildable(map_data, i, j):\n map_type = map_data[i][j] #initializes a variable as map_type to show the map type of the \n if map_type == 'U': #index positions. The if statements below are following the criteria\n if i != 0 or j != 0 or i != 6 or j != 7: #to be commercially buildable\n if map_data[i-1][j] != 'R' or map_data[i-1][j] != 'A' or map_data[i+1][j] != 'R' or map_data[i+1][j] != 'A' or map_data[i][j-1] != 'R' or map_data[i][j-1] != 'A' or map_data[i][j+1] != 'R' or map_data[i][j+1] != 'A':\n return True #adding 1 or subtracting 1 check to see if either the top, bottom,\n else: #left, or right of the given index position is a residential or \n return False #agricultural area\n\n\n# ---\n# ## Sample Output\n# Unlike the other computing labs that required you to run main() to validate your code, these functions can act as stand-alone functions. You have been provided with some test cases, but you are encouraged to create more to thoroughly test your code.\n\n# In[29]:\n\n\nMAP = [['A','A','A','A','U','U','U','U'],\n ['A','A','A','A','U','R','R','R'],\n ['W','W','W','W','T','T','T','T'],\n ['W','W','W','W','T','R','R','R'],\n ['C','C','U','U','T','R','U','U'],\n ['T','T','T','T','T','T','U','U'],\n ['U','U','U','U','T','R','U','U']]\n\nMAP2 = [['C','C','C','C','R','T','C'],\n ['T','T','T','T','T','C','C'],\n ['C','C','W','C','R','T','C'],\n ['C','C','C','W','U','T','C'],\n ['C','C','C','U','U','T','C'],\n ['C','C','C','C','C','U','C'],\n ['C','C','C','T','U','U','C'],\n ['C','T','C','T','U','A','C']]\n\n\n# countType() and classifyMap() functions\nprint(\"The number of U spaces in MAP =\",countType(MAP, 'U'))\nprint(\"The number of T spaces in MAP2 =\",countType(MAP2, 'T'))\nprint(\"MAP Type =\",classifyMap(MAP))\nprint(\"MAP2 Type =\",classifyMap(MAP2))\n\n# isolateType() function\nprint(\"-----------------\")\nprint(\"Isolated MAP: U\")\nMA = isolateType(MAP,'U')\nfor row in MA:\n print(row)\nprint(\"-----------------\")\nprint(\"Isolated MAP2: T\")\nMB = isolateType(MAP2,'T')\nfor row in MB:\n print(row)\nprint(\"-----------------\")\n\n# commerciallyBuildable() function\nprint(\"Is MAP commercially buildable at (4,2):\",commerciallyBuildable(MAP,4,2))\nprint(\"Is MAP2 commercially buildable at (2,2):\",commerciallyBuildable(MAP2,2,2))\n\n\n# The expected output for the provided test cases is given below:\n# ```\n# The number of U spaces in MAP = 17 \n# The number of T spaces in MAP2 = 12 \n# MAP Type = Mixed \n# MAP2 Type = City \n# -----------------\n# Isolated MAP: U\n# [' ', ' ', ' ', ' ', 'U', 'U', 'U', 'U']\n# [' ', ' ', ' ', ' ', 'U', ' ', ' ', ' ']\n# [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\n# [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\n# [' ', ' ', 'U', 'U', ' ', ' ', 'U', 'U']\n# [' ', ' ', ' ', ' ', ' ', ' ', 'U', 'U']\n# ['U', 'U', 'U', 'U', ' ', ' ', 'U', 'U']\n# -----------------\n# Isolated MAP2: T\n# [' ', ' ', ' ', ' ', ' ', 'T', ' ']\n# ['T', 'T', 'T', 'T', 'T', ' ', ' ']\n# [' ', ' ', ' ', ' ', ' ', 'T', ' ']\n# [' ', ' ', ' ', ' ', ' ', 'T', ' ']\n# [' ', ' ', ' ', ' ', ' ', 'T', ' ']\n# [' ', ' ', ' ', ' ', ' ', ' ', ' ']\n# [' ', ' ', ' ', 'T', ' ', ' ', ' ']\n# [' ', 'T', ' ', 'T', ' ', ' ', ' ']\n# -----------------\n# Is MAP commercially buildable at (4,2): True \n# Is MAP2 commercially buildable at (2,2): False\n# ```\n\n# ----------\n# ## Code Legibility (6 Marks)\n# Your code will be marked on commenting and code legibility.<br>\n# The mark breakdown is as follows:<br>\n# > 2 marks for using appropriate variable names that indicate what is being stored in that variable<br>\n# 2 marks for leaving comments on major parts of your code such as where you read the file or calculate a summation<br>\n# 2 marks for general legibility. The TA's should be able to understand your code without spending hours reading it. For example do not put your code in one very long line as this is hard for someone else reading your code to understand\n\n# ---\n# ## Test Plan\n# Develop a test plan for your program. Your test plan should have at least three test cases: one normal case, one boundary case, and one abnormal case. You can test any function but you must test **at least two different** functions. Please use the following format for your test cases:\n# \n# **Function:** \n# **Input:** \n# **Output:** \n# **Excepted Output:** \n# **Pass/Fail:** \n# \n# An example test case is shown below: \n# ```\n# Function: countType(map_data,map_type)\n# Input: map_data = [['U','T','U','A'],\n# ['R','T','W','A'],\n# ['U','T','A','W']] \n# map_type = 'U'\n# Output: 3\n# Excpected Output: 3\n# Pass/Fail: Pass\n# ```\n# \n# Implement your testing plan in the cell below! \n\n# ```\n# Function: countType(map_data, map_type)\n# Input: map_data = [['C','C','T','W'],\n# ['R','T','R','W'],\n# ['T','A','U','R']] \n# map_type = 'C'\n# Output: 2\n# Expected Output: 2\n# Pass/Fail: Pass \n# \n# Function: classifyMap(map_data):\n# Input: map_data = [['A','A','A','W'],\n# ['A','A','R','W'],\n# ['A','A','U','R']] \n# Output: Farmland\n# Expected Output: Farmland\n# Pass/Fail: Pass \n# \n# ```\n\n# ---\n# ## Reflective Questions (6 Marks)\n# \n# 1. Which functions did you use a nested structure (nested loops, nested conditionals, etc) to implement the requirements? Would it have been possible to implement them without using a nested structure? Which functions did you *not* use a nested structure? Would it have been possible to implement them *with* a nested structure? \n# \n# \n# 2. Suppose we wanted to create an additional map classification called 'Urban City' which is indicated by the number of 'R' cells plus the number of 'C' cells being between 60% and 80%. Can we do this? How might this affect our classifyMap() function?\n# \n# \n# 3. How many test cases would you need to confirm that your classifyMap() function correctly identifies a \"Farmland\" map? Explain what your test cases would be.\n\n# ```\n# The countType and isolateType function both utilized nested for loops, while the function commerciallyBuildable used nested if statements. It would be possible to implement the functions without using nested structures however, it would be very inefficient as there would need to be many more lines of code to be written. Additionally, the runtime for the function would also increase as well. The function that did not use a nested structure was the classifyMap function and it would be possible to implement a nested structure, however, it is much easier to have the if statements as is to reduce the lines of code needed to output the function.\n# This is possible to create an additional map classification as all this would change in the classifyMap() function would be adding an if statement to the conditional statements already implemented.\n# You would probably need about 7 different test cases to confirm that classifyMap() would be able to correctly identify a \"Farmland\" map. This would be to test out each matrix having over 50% of a certain type and since there's 6 types, 6 of the test cases are completed. You want to do this to make sure that whether the map_type changes, \"Farmland\" isn't the output for the map_data that does not have over 50% covered with 'A'. The 7th test case would then be used for the conditional statement that includes 'A', which is for the output \"City\" as you do not want the function to output farmland just because it includes 'A'.\n# \n# ```\n\n# ---\n# ## Submission\n# \n# Please download this notebook as a .py file (*File* > *Download as* > *Python (.py)*) and submit it to the Computing Lab 6 dropbox on avenue with the naming convention: macID_CL6.py\n# \n# **Make sure the final version of your lab runs without errors, otherwise, you will likely recieve zero.**\n# \n# This assignment is due the day after your Lab A section at 11:59 PM EST\n# \n# Late labs will not be accepted\n" }, { "alpha_fraction": 0.8083832263946533, "alphanum_fraction": 0.826347291469574, "avg_line_length": 82.5, "blob_id": "d3b801385e4096808e6389db83a6bd95f44af27a", "content_id": "5c5e2a88f1c76db67cfd2dca10994e068c1a0233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 167, "license_type": "no_license", "max_line_length": 138, "num_lines": 2, "path": "/README.md", "repo_name": "Nicholas-Fabugais-Inaba/Computing_Lab_Assignments", "src_encoding": "UTF-8", "text": "# Computing_Lab_Assignments\nFrom the course, Engineer 1P13A: Integrated Cornerstone Design Project, these are the computing lab assignments that I needed to complete.\n" }, { "alpha_fraction": 0.6370097994804382, "alphanum_fraction": 0.6625206470489502, "avg_line_length": 48.55345916748047, "blob_id": "66573cc4931782aac8d9cce7067eaacec6b10fda", "content_id": "27e0a9ed2b40c8a8f3f8bd443dd3a1777a273bd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15758, "license_type": "no_license", "max_line_length": 515, "num_lines": 318, "path": "/fabugain_CL7.py", "repo_name": "Nicholas-Fabugais-Inaba/Computing_Lab_Assignments", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Computing 7 Assignment\n# \n# \n\n# ---\n# ## Background\n# \n# In this assignment, you will be extracting daily temperature data, sorting the data, and then calculating the median temperature for a given set of data.\n# \n# A set of daily temperature data will be provided to you as a *list* of strings. For example:\n# \n# <br>\n# <center><code>temps = [\"5\",\"-1.0\",\"1.1\",\"None\",\"9\",\"NA\",\"1.0\"]</code></center>\n# <br>\n# \n# Note that the temperature may not be recorded on some days. A sequence of characters that cannot be represented as a number indicate that the temperature was not recorded on a given day. In the previous example, we can see that the strings \"None\" and \"NA\" were used to represent days where the temperature was not recorded. It is important to note that these strings can consist of any characters and can vary in length. Some more examples would be \"\" (the empty string), \"Nothing\", \"Not recorded\", \"Forgot\", etc.\n# \n# To calculate the median temperature for a given set of data, we first need to convert the data to a list of numbers. The data converted from the *temps* list is given below. Notice how the days that did not have a recorded temperature were ignored:\n# \n# <br>\n# <center><code>temps_extracted=[5.0,-1.0,1.1,9.0,1.0]</code></center>\n# <br>\n# \n# How would we calculate the median of our list? Recall that the median is a value separating the higher half from the lower half of a list of sorted values. In the case where the length of our list of numbers is odd, the median is the middle element in our list. In the case where the length of our list of numbers is even, the median can be found by taking the average of the two center most elements. \n# \n# We cannot calculate the median until we sort our values. Our sorted temperatures will look as follows:\n# \n# \n# <br>\n# <center><code>temps_sorted=[-1.0,1.0,1.1,5.0,9.0]</code></center>\n# <br>\n# \n# \n# Now that our list is sorted, we can see that the median in this case is **1.1**.\n# <br>\n# \n# ---\n# \n# So how do we sort a list of values? Formally, a sorting algorithm is a sequence of steps that are used to sort a sequence of values. It turns out that there exist many different sorting algorithms, some more complicated than others. In todays assignment we are going to focus on an intuitive sorting algorithm known as **selection sort**. We are going to use this algorithm to sort a list of numbers in ascending order.\n# <br>\n# \n# Say we have a list named *nums*. The main idea of selection sort is that we want to start iterating over our list *nums* from i = 0 to the length of our list minus 1 (n-1), choosing the smallest element in the sub list from *nums[i]* to *nums[n-1]* and placing it in *nums[i]* on each iteration. If you are confused, don't worry! We have provided you the pseudocode for the algorithm. Before looking at the pseudocode, use the following example to gain some intuition about how selection sort works.\n# <br>\n# <br>\n# \n# Imagine that we have the following list of numbers that need to be sorted:\n# \n# ![alg%20%2826%29.png](attachment:alg%20%2826%29.png)\n# \n# \n# We start sorting at the left most item. We call our current position index i. The left most element in our list is the number 2, which occurs at i = 0.\n# \n# ![alg%20%2825%29.png](attachment:alg%20%2825%29.png)\n# \n# When considering the final sorted list, should the number 2 be in this position? What is the smallest element in our list from index i to the end of our list (index 4)? By inspection, we can see that the smallest number is 1. We can identify the position of this number using the variable min_index, which in this case is min_index = 2. Therefore the number 1 should be at index i because it is the smallest element in our list from index 0 to index 4!\n# \n# ![alg%20%2824%29.png](attachment:alg%20%2824%29.png)\n# \n# We can guarantee that the number 1 will be in the correct position in our final sorted list if we swap the elements at i and min_index! \n# \n# ![alg%20%2823%29.png](attachment:alg%20%2823%29.png)\n# \n# You might think that 2 is still not in its correct position, but don't worry! We can guarantee that the number 1 is in the correct position, and we will worry about 2 later.\n# <br>\n# <br>\n# \n# Let's now increment i to 1.\n# ![alg%20%2821%29.png](attachment:alg%20%2821%29.png)\n# \n# Let's perform the same actions we did when i was 0. What is the smallest element in our list from the element at this position (i = 1) to the end of our list (index 4)? By inspection, we can see that the smallest number is 2 which occurs at index 2, thus min_index = 2 in this case.\n# \n# ![alg%20%289%29.png](attachment:alg%20%289%29.png)\n# \n# We then swap the elements at i and min_index!\n# \n# ![alg%20%2811%29.png](attachment:alg%20%2811%29.png)\n# \n# Let's now increment i once again to 2.\n# \n# ![alg%20%2812%29.png](attachment:alg%20%2812%29.png)\n# \n# Can you see what's going on here? All of the elements (green) to the left of our current index i are sorted! Therefore our entire list will be sorted if we continue with this algorithm until we reach the end of our list. The following illustrations demonstrate how the algorithm will perform for the rest of the list.\n# \n# ![alg%20%2820%29.png](attachment:alg%20%2820%29.png)\n# ![alg%20%2814%29.png](attachment:alg%20%2814%29.png)\n# \n# Increment i once again.\n# \n# ![alg%20%2815%29.png](attachment:alg%20%2815%29.png)\n# ![alg%20%2819%29.png](attachment:alg%20%2819%29.png)\n# ![alg%20%2817%29.png](attachment:alg%20%2817%29.png)\n# \n# When we increment i one last time, we reach the end of our list and all elements are sorted!\n# \n# ![alg%20%2818%29.png](attachment:alg%20%2818%29.png)\n# \n# <br>\n# \n# The pseudocode for the algorithm is given below. We assume that the list we are sorting is given by the name *nums*.\n# ```\n# Create a variable n and set it equal to the length of nums\n# For i = 0 to n-1\n# Create a variable min_index, and set it equal to i\n# For j=i to n-1\n# if nums[j] is less than nums[min_index] then set min_index to j\n# endFor\n# Swap the elements at nums[i] and nums[min_index]\n# endFor\n# ```\n# \n# It is your task to implement the sorting algorithm from the pseudocode, as well as other functions listed in the requirements section of this document.\n\n# ---\n# ## NOTE THAT YOU WILL BE MARKED ON MULTIPLE ITEMS IN THIS LAB IN ADDITION TO THE FUNCTIONALITY OF YOUR CODE\n# - Variable Names\n# - Commenting\n# - General Legibility\n# - Try and Except\n# - Test Plan\n\n# ---\n# ## Program Requirements (12 Marks)\n# \n# The requirements of the system are given below. Please ensure that your functions have the EXACT naming as specified! Failure to do so will result in lost marks. \n# \n# ***Note: you must include a try and except statement in at least two functions in your code.***\n# \n# 1.\tDefine a function **extract_temps**(*temps*):\n# - ***temps***: A *list* of *strings* representing a set of daily temperatures. \n# -\t**Return**: A *list* of floats that represent the elements in *temps* that could be converted into floats.\n# <br>*Note: Some of the items in temps cannot be represented as a float. These items should be ignored. Refer to the background section as an example*\n# \n# \n# 2.\tDefine a function **selection_sort**(*nums*):\n# -\t***nums***: A *list* of floats.\n# -\t**Return**: A sorted copy of the list *nums* using the selection sort algorithm.\n# <br>*Note: Make sure that you sort a copy of the list. DO NOT modify the original list. You can create a copy of nums by writing the code new_arr = nums.copy(). In addition, marks will not be given if the selection sort algorithm is not used.* \n# \n# \n# 3.\tDefine a function **calculate_median**(*nums*):\n# -\t***nums***: A *list* of sorted floating numbers.\n# -\t**Return**: A *float* representing the median of nums.\n# <br>*Hint: If the length of a sorted list is odd then the median exists at index n//2 where n is the length of the list and // represents integer division. If the length of a sorted list is even, the median is calculated by the average of the two most middle elements. One of these elements exists at index n//2, can you figure out the other one? For example, the median of [1.0,2.0,3.0,4.0] is (3.0+2.0)/2 = 2.5*\n# \n# \n# 4. Define a function **main**(*temp_data*):\n# -\t***temp_data***: A *list* of *strings* representing a set of daily temperatures. \n# -\t**Return**: A *float* representing the median of nums. Return the *string* \"N/A\" if an error occurs.\n\n# ---\n# ## Implementation\n# \n# Please define all functions in the cell below\n\n# In[3]:\n\n\nimport math #imports the math library\ndef extract_temps(temps):\n temps_extracted = [] #creates an empty list for elements able to be converted into floats\n for x in range(len(temps)): #for loop goes through each element in the list\n try: #try statement checks if the element in the list is able to be converted\n check = float(temps[x]) #into a float variable\n except ValueError: #a ValueError will run if it is not possible\n check = False\n else: #if it is possible to be converted, set the check variable to True and\n check = True #apply that to the conditional if statements to either append the \n if check == True: #convertable float element into the empty list or go to the next element\n temps_extracted.append(float(temps[x]))\n elif check == False:\n continue\n return temps_extracted #returns the list of convertable float elements\n \ndef selection_sort(nums):\n new_arr = nums.copy() #creates a copy of the original nums list and sets it to a variable\n n = len(new_arr) #a variable 'n' is initialized as the length of the copied nums list\n for i in range(0, n): #'for i' for loop runs through the new_arr list as it initializes \n min_index = i #min_index to i each time. The 'for j' for loop runs from the variable i\n for j in range(i, n): #'till the n variable, checking if the variable at the 'j' index position\n if new_arr[j] < new_arr[min_index]: #is a lower number than the variable at index 'i' in new_arr.\n min_index = j #If so, min_index is reinitialized to j and the positions of the 'i' and \n new_arr[i], new_arr[min_index] = new_arr[min_index], new_arr[i] #min_index elements are swapped.\n return new_arr #the new sorted convertable float element list is returned\n\ndef calculate_median(nums):\n median = math.floor(len(nums)/2) #a median variable is set to round up in case the middle of a list is a\n if len(nums)%2 == 0: #decimal. If the list has an even length, return the average of the \n return (nums[median] + nums[median-1])/2 #two middle elements \n elif len(nums)%2 == 1: #If the list is odd, return the element in the middle of the list\n return nums[median] #based on the index position established in the median variable\n \ndef main(temp_data):\n try: #try statement utilizes all the functions above and returns the median\n return calculate_median(selection_sort(extract_temps(temp_data))) #of the list as a float median\n except: #except statement returns \"N/A\", if an error occurs\n return \"N/A\"\n \n\n\n# ---\n# ## Sample Output\n# \n\n# The following cell provides some sample code for you to test our your code.\n\n# In[4]:\n\n\ntemps = [\"5\",\"\",\"5.5\",\"6.2\",\"4.5\",\"N/A\",\"Not Recorded\",\"5.67\"]\nprint(\"Given Temps:\",temps)\nprint(\"Median:\",main(temps))\nprint(\"-----\")\ntemps = [\"5\",\"N/A\",\"5\"]\nprint(\"Given Temps:\",temps)\nprint(\"Median:\",main(temps))\nprint(\"-----\")\ntemps = [\"-10\",\"\",\"\"]\nprint(\"Given Temps:\",temps)\nprint(\"Median:\",main(temps))\nprint(\"-----\")\ntemps = [\"\",\"\",\"\"]\nprint(\"Given Temps:\",temps)\nprint(\"Median:\",main(temps))\n\n\n# The expected output for the previous cell is given below:\n# \n# <code>\n# Given Temps: ['5', '', '5.5', '6.2', '4.5', 'N/A', 'Not Recorded', '5.67']\n# Median: 5.5\n# -----\n# Given Temps: ['5', 'N/A', '5']\n# Median: 5.0\n# -----\n# Given Temps: ['-10', '', '']\n# Median: -10.0\n# -----\n# Given Temps: ['', '', '']\n# Median: N/A\n# </code>\n\n# ---\n# ## Code Legibility (6 Marks)\n# Your code will be marked on commenting and code legibility.<br>\n# The mark breakdown is as follows:<br>\n# > 2 marks for using appropriate variable names that indicate what is being stored in that variable<br>\n# 2 marks for leaving comments on major parts of your code such as where you read the file or calculate a summation<br>\n# 2 marks for exception handling. Your functions should produce the required outputs even when receiving unexpected inputs\n\n# ---\n# ## Test Plan (6 Marks)\n# Develop a test plan for your program. Your test plan should have at least three test cases: one normal case, one boundary case, and one abnormal case. You can test any function but you must test **at least two different** functions. Please use the following format for your test cases:\n# \n# **Function:** \n# **Input:** \n# **Output:** \n# **Excepted Output:** \n# **Pass/Fail:** \n# \n# An example test case is shown below: \n# ```\n# Function: extract_temps(temps)\n# Input: temps = [\"1\",\"NA\",\"5.5\"] \n# Excpected Output: [1.0,5.5]\n# Output: [1.0,5.5]\n# Pass/Fail: Pass\n# ```\n# \n# Implement your testing plan in the cell below! \n\n# ```\n# Function: extract_temps(temps)\n# Input: temps = [\"1.3\",\"6.4\",\"NA\",\"2.5\",\"Not Recorded\"] \n# Expected Output: [1.3,6.4,2.5]\n# Output: [1.3,6.4,2.5]\n# Pass/Fail: Pass\n# \n# Function: main(temp_data)\n# Input: temps = [] \n# Expected Output: N/A\n# Output: N/A\n# Pass/Fail: Pass\n# \n# Function: calculate_median(nums)\n# Input: nums = [5.5] \n# Expected Output: 5.5\n# Output: 5.5\n# Pass/Fail: Pass\n# \n# ```\n\n# ---\n# ## Reflective Questions\n# \n# 1. What input would cause your main() function to return \"N/A\"?\n# \n# \n# 2. Assuming that functions 1-3 are only used inside your main function, is it necessary to use try and except statements inside them to validate input?\n\n# ```\n# 1. Having an empty list, a list with elements but none of them are strings that can be converted into floats, and a list with elements that are empty strings would return \"N/A\" for the main() function.\n# 2. It is necessary to use try and except statements inside the extract_temps() function as it allows strings to be checked whether they are able to be converted into floats or not, essentially checking if they are numbers or not. However, another way could be to check each character in the string and identifying if it is a number, decimal or negative sign. This would be very inefficient though as there would be an excessive amount of code needed for this, lengthening the time for the program to run.\n# \n# ```\n\n# ---\n# ## Submission\n# \n# Please download this notebook as a .py file (*File* > *Download as* > *Python (.py)*) and submit it to the Computing Lab 7 dropbox on avenue with the naming convention: macID_CL7.py\n# \n# **Make sure the final version of your lab runs without errors, otherwise, you will likely recieve zero.**\n# \n# This assignment is due the day after your Lab A section at 11:59 PM EST\n# \n# Late labs will not be accepted\n" } ]
3
rmartinsfer/HotelCast
https://github.com/rmartinsfer/HotelCast
7e9d7cd399be3fe6ec2b623ffdbd68882c3e4bf6
a9ca6eb76cb87f58857ef2cf2a9070906ce0c05e
d64a1b62b8ff4e2b16be56b55fb946a97f31be54
refs/heads/master
2023-03-18T23:46:59.934780
2018-10-24T15:34:28
2018-10-24T15:34:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49152541160583496, "alphanum_fraction": 0.49152541160583496, "avg_line_length": 16.75, "blob_id": "6b53b958438fbe4e43f07b43896337c4232356d0", "content_id": "244eee7733615821837e35362f0f26088b7973cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 354, "license_type": "no_license", "max_line_length": 44, "num_lines": 20, "path": "/django_webapp/castapp/static/js/demo_site.js", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "function demo_file(){\n\t\t$.ajax({\n\t\t\turl : \"/command\",\n\t\t\tmethod: \"get\",\n\t\t\tsuccess: function(res){\n\t\t\t\t$.notify({\n\t\t\t\t\ticon: 'glyphicon glyphicons-info-sign',\n\t\t\t\t\ttitle: 'The command sent was:', \n\t\t\t\t\tmessage: res,\n\t\t\t\t\ttarget: '_blank'\n\t\t\t\t},{\t\n\t\t\t\t\ttype: \"info\",\n\t\t\t\t\tplacement: {\n\t\t\t\t\t\tfrom: \"bottom\",\n\t\t\t\t\t\talign: \"right\"\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n}" }, { "alpha_fraction": 0.5444444417953491, "alphanum_fraction": 0.5444444417953491, "avg_line_length": 19, "blob_id": "b75b3c99da5f38f6b6d5b69691fea93df693d18b", "content_id": "115cefc3733f8b9456949792a7429d92e8ca88f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1440, "license_type": "no_license", "max_line_length": 66, "num_lines": 72, "path": "/django_webapp/castapp/static/js/hotelcast.js", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "$(function(){\n\thandled = false;\n\tfunction sendRequest(data){\n\t\t$.ajax({\n\t\t\turl: \"execute/?command=\" + data,\n\t\t\ttype: 'GET',\n\t\t\tsuccess: function(res) {\n\t\t\t\t\n\t\t\t\tconsole.log(res)\n\t\t\t\t$.notify({\n\t\t\t\t\ticon: 'glyphicon glyphicons-check',\n\t\t\t\t\ttitle: 'Sucess!',\n\t\t\t\t\tmessage: 'The command has ben sent to the sever!',\n\t\t\t\t\ttarget: '_blank'\n\t\t\t\t},{\t\n\t\t\t\t\ttype: \"success\",\n\t\t\t\t\tplacement: {\n\t\t\t\t\t\talign: \"center\"\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t},\n\t\t\terror: function(res) {\n\t\t\t\t$.notify({\n\t\t\t\t\ticon: 'glyphicon glyphicons-warning-sign',\n\t\t\t\t\ttitle: 'Ope!',\n\t\t\t\t\tmessage: 'There was an error of some sort please try again!',\n\t\t\t\t\ttarget: '_blank'\n\t\t\t\t},{\t\n\t\t\t\t\ttype: \"danger\",\n\t\t\t\t\tplacement: {\n\t\t\t\t\t\talign: \"center\"\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\t};\n\t\n\t$(document).on('click touchstart', \"#actionLink\",function(e){\n\t\t\tvar link = $(this).attr('data');\n\t\t\t\n\t\t\tif(e.type == 'touchstart'){\n\t\t\t\tsendRequest(link);\n\t\t\t\thandled = true;\n\t\t\t}\n\t\t\telse if(handled == false){\n\t\t\t\tsendRequest(link);\n\t\t\t}\n\t\t\t\n\t});\n\t\n\t$(document).on('click touchstart', \"#actionPlay\",function(e){\n\t\t\tvar data = \"play\"\n\t\t\tif(e.type == 'touchstart'){\n\t\t\t\tsendRequest(data);\n\t\t\t\thandled = true;\n\t\t\t}\n\t\t\telse if(handled == false){\n\t\t\t\tsendRequest(data);\n\t\t\t}\n\t});\n\t\n\t$(document).on('click touchstart', \"#actionPause\",function(e){\n\t\t\tvar data = \"pause\"\n\t\t\tif(e.type == 'touchstart'){\n\t\t\t\tsendRequest(data);\n\t\t\t\thandled = true;\n\t\t\t}\n\t\t\telse if(handled == false){\n\t\t\t\tsendRequest(data);\n\t\t\t}\n\t});\n});\n" }, { "alpha_fraction": 0.7101293206214905, "alphanum_fraction": 0.7112069129943848, "avg_line_length": 29.442623138427734, "blob_id": "c29d8b7ae3cd7dcc2f4fb2eb3190654e5a05743d", "content_id": "24876c8e7dd323a5e50c139c9d3ad58465ce8b04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1856, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/local_server/local_server.py", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "import requests as req\nfrom subprocess import Popen\nimport webbrowser\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\n#get selenium driver\ndef get_driver():\n\tPopen('taskkill /im chromedriver.exe /f')\n\tPopen('taskkill /im chrome.exe /f')\n\ttime.sleep(3)\n\toptions = webdriver.ChromeOptions() \n\toptions.add_argument(\"user-data-dir=C:\\Users\\gsiders\\AppData\\Local\\Google\\Chrome\\User Data\") \n\toptions.add_argument(\"--start-maximized\") \n\tdriver = webdriver.Chrome(\"c:\\dev\\chromedriver.exe\",chrome_options=options)\n\treturn driver\ndef first_run(driver): \n\tr = req.get(\"https://hotelcast.gsiders.app/command/\")\n\turl = r.text\n\tlocal_url = open(\"command.txt\", \"w\")\n\tlocal_url.write(url)\n\tlocal_url.close()\n\ndef second_run(driver):\n\tr = req.get(\"https://hotelcast.gsiders.app/command/\")\n\turl = r.text\n\tcommand_file = open(\"command.txt\", \"r\")\n\tlocal_command = command_file.read()\n\tcommand_file.close()\n\turl = url.strip(\"\\n\")\n\tif(url == local_command): \n\t\tprint \"matches local url...will continue listeing\"\n\t\ttime.sleep(3)\n\t\treturn second_run(driver)\n\tif(url == \"pause\"): \n\t\tel = driver.find_element_by_class_name(\"nf-player-container\")\n\t\tel.send_keys(Keys.RETURN)\n\t\tnew_url = open(\"command.txt\", \"w\")\n\t\tnew_url.write(url)\n\t\tnew_url.close()\n\t\treturn second_run(driver)\n\tif(url == \"play\"): \n\t\tel = driver.find_element_by_class_name(\"nf-player-container\")\n\t\tel.send_keys(Keys.RETURN)\n\t\tnew_url = open(\"command.txt\", \"w\")\n\t\tnew_url.write(url)\n\t\tnew_url.close()\n\t\treturn second_run(driver)\n\tif(url != local_command):\n\t\tprint url\n\t\tprint \"doesn't match local url...opening url\"\n\t\tdriver.get(url)\n\t\tdriver.fullscreen_window()\n\t\tnew_url = open(\"command.txt\", \"w\")\n\t\tnew_url.write(url)\n\t\tnew_url.close()\n\t\tsecond_run(driver)\ndriver = get_driver()\nfirst_run(driver)\nsecond_run(driver)" }, { "alpha_fraction": 0.6847935318946838, "alphanum_fraction": 0.6921786069869995, "avg_line_length": 28.939699172973633, "blob_id": "91407c6d3e5ddf9678d0b20bf62b7ba5a110fc61", "content_id": "c9a72e1b141fab5b56ff61e84beabac68d352735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5958, "license_type": "no_license", "max_line_length": 163, "num_lines": 199, "path": "/django_webapp/castapp/views.py", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n#django imports \nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import resolve\nimport os\n#guidebox library\nimport guidebox\n#setup guidebox\n\n#So my api key is not on github...\nif os.name == \"nt\":\n\tKeyDoc = open(\"../../guidebox_api_key.txt\", 'r')\nelse:\n\tKeyDoc = open(\"/home/guidebox_api_key.txt\", 'r') \napi_key = KeyDoc.read()\napi_key = api_key.strip('\\n')\nguidebox.api_key = api_key\n\n#Classes\nclass SearchResults:\n\tdef __init__(self,id):\n\t\tself.id = id\n\t\tself.title = None\n\t\tself.img = None\n\tdef add_data(self,id,title,img,bio):\n\t\tself.id = id\n\t\tself.title = title\n\t\tself.img = img \n\t\tself.bio = bio\nclass ShowObject:\n\tdef __init__(self,id):\n\t\tself.id = id\n\t\tself.title = None\n\t\tself.img = None\n\tdef add_data(self,id,title,img_lg,bio):\n\t\tself.id = id\n\t\tself.title = title\n\t\tself.img_lrg = img_lg\n\t\tself.bio = bio\nclass MovieObject:\n\tdef __init__(self,id):\n\t\tself.id = id\n\t\tself.title = None\n\t\tself.img = None\n\tdef add_data(self,id,title,img_lg,bio,netflix,hulu,amazon):\n\t\tself.id = id\n\t\tself.title = title\n\t\tself.img_lrg = img_lg\n\t\tself.bio = bio\n\t\tself.netflix = netflix\n\t\tself.hulu = hulu\n\t\tself.amazon = amazon\nclass SeasonObject:\n\tdef __init__(self,id):\n\t\tself.id = id\n\t\tself.title = None\n\t\tself.img = None\n\tdef add_data(self,id,ses_num,title,img,bio,url):\n\t\tself.id = id\n\t\tself.title = title\n\t\tself.img = img\n\t\tself.bio = bio\t\t\n\t\tself.url = url\n\t\tself.season_number = ses_num\n\t\t\n#views \t\n#demo mode toggle\ndemo_mode = False\n\ndef index(request):\n\t\n\tif(demo_mode == True):\n\t\treturn render(request, 'demo_index.html', {\"title\": \"Hotel Cast\", \"demo_mode\":demo_mode} )\n\telse:\n\t\treturn render(request, 'index.html', {\"title\": \"HotelCast\", \"demo_mode\":demo_mode} )\n\ndef search_show(request):\n\tquery = request.GET.get('query')\n\tresults = guidebox.Search.shows(feild='title',query=query)\n\tresults = results.results\n\tresult_dict = dict()\n\tfor result in results:\n\t\tid = result.id\n\t\ttitle = result.title \n\t\timg = result.artwork_208x117\n\t\tshow_bio = guidebox.Show.retrieve(id=id)\n\t\tbio = show_bio.overview\n\t\tresult_dict[id] = SearchResults(id)\n\t\tresult_dict[id].add_data(id,title,img,bio)\n\tsearch_type = \"show\"\n\treturn render(request, 'search.html', {'search_type':search_type, 'results':result_dict, \"demo_mode\":demo_mode}) \n\ndef search_movie(request):\n\tquery = request.GET.get('query')\n\tresults = guidebox.Search.movies(feild='title',query=query)\n\tresults = results.results\n\tresult_dict = dict()\n\tfor result in results:\n\t\tid = result.id\n\t\ttitle = result.title \n\t\timg = result.poster_240x342\n\t\tshow_bio = guidebox.Movie.retrieve(id=id)\n\t\tbio = show_bio.overview\n\t\tresult_dict[id] = SearchResults(id)\n\t\tresult_dict[id].add_data(id,title,img,bio)\n\tsearch_type = \"movie\"\n\treturn render(request, 'search.html', {'search_type':search_type, 'results':result_dict, \"demo_mode\":demo_mode}) \t\ndef movie_info(request):\n\tmovie_id = request.GET.get('id')\n\tmovie_info = guidebox.Movie.retrieve(id=movie_id,include_links=True)\n\tmovie_dict = dict()\n\timg_lrg = movie_info.poster_400x570\n\tmovie_title = movie_info.title\n\tmovie_bio = movie_info.overview\n\t#prefill links to none\n\tnetflix = None\n\tamazon = None\n\thulu = None\n\tfor obj in movie_info.subscription_web_sources:\n\t\tif(obj.source == \"netflix\"):\n\t\t\turl = obj.link\n\t\t\turl = url.replace('Movie','watch')\n\t\t\turl = url.replace('movies.','')\n\t\t\tnetflix = url\n\t\tif(obj.source == \"amazon_prime\"):\n\t\t\tamazon = obj.link\n\t\tif(obj.source == \"hulu_plus\"):\n\t\t\thulu = obj.link\n\tmovie_dict[movie_id] = MovieObject(movie_id)\n\tmovie_dict[movie_id].add_data(movie_id,movie_title,img_lrg,movie_bio,netflix,hulu,amazon)\n\treturn render(request,'movie.html', {'title': movie_title, 'movie_info':movie_dict, \"demo_mode\":demo_mode})\t\n\t\n\t\n\ndef show_info(request):\n\tshow_id = request.GET.get('id')\n\tshow_info = guidebox.Show.retrieve(id=show_id)\n\tshow_dict = dict()\n\timg_lrg = show_info.artwork_608x342\n\tshow_title = show_info.title\n\tbio = show_info.overview\n\tshow_dict[show_id] = ShowObject(show_id)\n\tshow_dict[show_id].add_data(show_id,show_title,img_lrg,bio)\n\t#episode info\n\tepisodes = guidebox.Show.episodes(id=show_id,season=1,reverse_ordering=True,include_links=True)\n\tresults = episodes.results\n\tresult_dict = OrderedDict()\n\tfor result in results:\n\t\tid = result.id\n\t\ttitle = result.original_title\n\t\timg = result.thumbnail_400x225\n\t\tbio = bio\n\t\tses_num = result['season_number']\n\t\turl = result['subscription_web_sources']\n\t\tnetflix_status = \"no\"\n\t\tfor obj in url:\n\t\t\tif obj['source'] == \"netflix\":\n\t\t\t\tnetflix_status = \"yes\"\n\t\t\t\turl = obj['link']\n\t\t\t\turl = url.replace('Movie','watch')\n\t\t\t\turl = url.replace('movies.','')\n\t\t\tif obj['source'] == \"hulu_plus\":\n\t\t\t\turl = obj['link']\n\t\tresult_dict[id] = SeasonObject(id)\n\t\tresult_dict[id].add_data(id,ses_num,title,img,bio,url)\n\treturn render(request,'show.html', {'title': show_title, 'show_info':show_dict, 'episodes': result_dict, 'netflix_status': netflix_status, \"demo_mode\":demo_mode})\ndef write_command(request):\n\tcommand = request.GET.get('command');\n\timport os\n\tcommand_file = open(os.path.join(os.path.dirname(__file__), 'command.txt'), 'w')\n\tcommand_file.write(command)\n\tcommand_file.close()\n\tdata = {\n 'success_message': \"true\"\n\t}\n\treturn JsonResponse(data)\ndef get_command(request):\n\tcontent = open(os.path.join(os.path.dirname(__file__), 'command.txt'), 'r')\n\tcontent = content.read()\n\treturn HttpResponse(content, content_type='text/plain')\ndef remote(request):\n\treturn render(request, 'remote.html',)\n\t\n#error views\ndef not_found(request):\n return render(request, 'errors/error_404.html')\ndef server_error(request):\n return render(request, 'errors/error_500.html')\n \ndef permission_denied(request):\n return render(request, 'errors/error_403.html')\n \ndef bad_request(request):\n return render(request, 'errors/error_400.html')\n" }, { "alpha_fraction": 0.7426160573959351, "alphanum_fraction": 0.746835470199585, "avg_line_length": 45.326087951660156, "blob_id": "0dd3af49bfe2ca8d822f0efc02add3de411b66fb", "content_id": "1b1e3748022d181c9ab4cd4675cf2d08a85efb05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2133, "license_type": "no_license", "max_line_length": 335, "num_lines": 46, "path": "/Readme.md", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "# Hotel Cast\nA Chromecast alternative that works on hotel WiFi. \n[Live Demo](https://gsiders.app/demo)\n## Why?? \nSince I travel for work I never can use my chromecast since most hotel networks isolate clients or require a sign in page. Both of these cause the chromecast not to work. So I took it into my own hands to write and application to fix this. Yes I could have just bought a travel router or a Roku, but I like a good challenge!\n\nI normally just plug my laptop into the TV and watch whatever I need. That is fine and all but playing, pausing, changing videos requires me to get up and interact with my computer. I tried using TeamViewer on my phone to control it but it just didn't work well. So I took things into my own hands and wrote an app I call it HotelCast!\n\n\n## How it works: \nIt consists of three parts:\n\n#### CLIENT:\n\n- Client uses a front facing web app that servers as control for the local server.\n- Client selects the show/movie/Youtube video they want to watch the link is written to the command file \n\n#### CLOUD SERVER: \n- Serves frontend\n- Servers and hosts the command file of what to \"cast\" (play) on local computer\n- Django webapp\n\n#### Local Server: \n\n- Local server pulls from the cloud server the commands to run via a python script. \n- The script is constantly querying the cloud server for changes to the command file.\n### EXAMPLE:\n\n#### Client:\n- Client selects a episode of a show on netflix (pakrs and rec, season 1, episode 7)\n- WebApp writes to cloud server file (command.txt) the direct URL to play (the Netflix video)\n#### Cloud Server :\n- Serves command file containing Netflix watch URL (command.txt)\n#### Local server:\n- Using python I query the cloud server for the command.txt(command.txt)\n- I then use python to play the url (doesn't matter if its netflix, hulu, youtube, ect) in chrome (Selenium ChromeDriver) in full screen\n## Requirements\n- Python 2.7 \n- Django 1.11.16\n- [Guidebox](http://guidebox.com)\n- Python Requests \n- Selnium\n\n## About\nIdea and code was writen by: \n[Grant Siders](http://grantsiders.com). Feel free to contriube by forking the repo and opening a pull request! \n\n" }, { "alpha_fraction": 0.5104926824569702, "alphanum_fraction": 0.5323905348777771, "avg_line_length": 32.72307586669922, "blob_id": "94ffd89a399de1d8b860923051eaaf53bf01391f", "content_id": "19c39afe2b7da172c4e27e45f781625ddfaba388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2192, "license_type": "no_license", "max_line_length": 194, "num_lines": 65, "path": "/django_webapp/castapp/static/js/guidebox_load.js", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "\nfunction getUrlVars()\n{\n var vars = [], hash;\n var hashes = window.location.href.slice(window.location.href.indexOf('?') + 1).split('&');\n for(var i = 0; i < hashes.length; i++)\n {\n hash = hashes[i].split('=');\n vars.push(hash[0]);\n vars[hash[0]] = hash[1];\n }\n return vars;\n}\n\n\n$(function(){\n\tvar show_id = getUrlVars()[\"id\"];\n\tvar api_key = \"b8164d1dad070aa1840c4670c661ed63c0ee0bc1\"\t\n\t$.getJSON(\"https://api-public.guidebox.com/v2/shows/\" + show_id + \"/episodes?api_key=\" + api_key, function(json1){\n\t\t\tnum_season = json1.results[0].season_number;\n\t\t\tfor (i = 2; i <= num_season; i = i + 1) {\n\t\t\t\ts_num = i;\n\t\t\t\t$(\"#season_dropdown\").append('<option value=\"' + i + '\">Season' + i + '</option>');\n\t\t};\n\t});\n\t\n\t$(\"#season_dropdown\").change(function(){\n\t\t\tvar season = this.value;\n\t\t\t$(\"#season_row\").empty()\n\t\t\t$.getJSON(\"https://api-public.guidebox.com/v2/shows/\" + show_id + \"/episodes?api_key=\" + api_key + \"&include_links=true&season=\" + season , function(json){\n\t\t\t\t\t$.each(json.results, function(d, item){\n\t\t\t\t\t\ttitle = item.original_title;\n\t\t\t\t\t\timg = item.thumbnail_400x225;\n\t\t\t\t\t\turls = item.subscription_web_sources;\n\t\t\t\t\t\tnetflix_status = $(\"#netflix_status\").attr(\"data\");\n\t\t\t\t\t\tif(netflix_status == \"true\"){\n\t\t\t\t\t\t\t$.each(urls, function(i, item){\n\t\t\t\t\t\t\t\tsource = item.source;\n\t\t\t\t\t\t\t\tvar netflix = \"netflix\";\n\t\t\t\t\t\t\t\tif(source == netflix){\n\t\t\t\t\t\t\t\t\turl = item.link;\n\t\t\t\t\t\t\t\t\turl = url.replace(\"movies.\", \"\");\n\t\t\t\t\t\t\t\t\turl = url.replace(\"Movie\",\"watch\");\n\t\t\t\t\t\t\t\t\t$(\"#season_row\").append('<div class=\"col-md-3 col-sm-6 mb-4\"><span id=\"actionLink\" data=\"' + url + '\"><img class=\"img-fluid\" src=\"' + img +'\" alt=\"\"></span><p>' + title + '</p></div>');\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n\t\t\t\t\t }\n\t\t\t\t\t else{\n\t\t\t\t\t\t$.each(urls, function(i, item){\n\t\t\t\t\t\t\tsource = item.source;\n\t\t\t\t\t\t\tconsole.log(source);\n\t\t\t\t\t\t\tvar hulu_plus = \"hulu_plus\";\n\t\t\t\t\t\t\tif(source == hulu_plus){\n\t\t\t\t\t\t\t\turl = item.link;\n\t\t\t\t\t\t\t\t$(\"#season_row\").append('<div class=\"col-md-3 col-sm-6 mb-4\"><span id=\"actionLink\" data=\"' + url + '\"><img class=\"img-fluid\" src=\"' + img +'\" alt=\"\"><p>' + title + '</p></div>');\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t }\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\t\t});\n\t\t\t\t\n\t\t\t});\n\t});\n\t\n});" }, { "alpha_fraction": 0.6408730149269104, "alphanum_fraction": 0.6408730149269104, "avg_line_length": 30.5, "blob_id": "d02731cd841dadb07173c11b167f3e245c3b9477", "content_id": "ca2988092924e78df3673e319abeb8b990181079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/django_webapp/castapp/urls.py", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\n\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='home'),\n\turl(r'^search_show/$', views.search_show, name='search'), \n\turl(r'^search_movie/$', views.search_movie, name='search'), \n\turl(r'^show/?', views.show_info, name='show '), \n\turl(r'^movie/?', views.movie_info, name='movie'), \n\turl(r'^execute/$', views.write_command, name=\"execute\"),\n\turl(r'^command/$', views.get_command, name=\"command\"),\n\turl(r'^remote/?', views.remote, name=\"remote\"),\n]\n" }, { "alpha_fraction": 0.6033057570457458, "alphanum_fraction": 0.6033057570457458, "avg_line_length": 21.75, "blob_id": "1656a564def9887369012004af885a8823259a23", "content_id": "b13f187a6afa84cad019f5e767286c088a9000ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 363, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/django_webapp/castapp/templates/errors/error_404.html", "repo_name": "rmartinsfer/HotelCast", "src_encoding": "UTF-8", "text": "{% extends 'errors/error_base.html' %}\n{% block title %} Page Not Found! {% endblock %}\n{% block content %}\n<div class=\"card-header\">\n <center>\n Sorry that page was not found! \n </center>\n</div>\n<div class=\"card-block\">\n <center>\n <p class=\"lead\">\n Ope! Your requested page was not found! Go <a href=\"/\">Home</a>.\n </p>\n <center>\n</div>\n{% endblock %}" } ]
8
brotherJ4mes/bash_scripts
https://github.com/brotherJ4mes/bash_scripts
82da909df979bf3465c6bf1c70aa0e600bfd243a
9d6124cb4d35b78de00a4b0fcb861a5c9b5ef87b
4216efbd009eda1a57292693efd65e193272ff6b
refs/heads/master
2023-06-08T03:00:59.169818
2023-05-25T16:26:50
2023-05-25T16:26:50
166,281,964
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7597402334213257, "alphanum_fraction": 0.7597402334213257, "avg_line_length": 50.33333206176758, "blob_id": "bd0a7755529585d1430ab1a3a6ca9c278d9ffc61", "content_id": "4c6e2fc0129ade5379b09ce8a134cb1248139835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 154, "license_type": "no_license", "max_line_length": 83, "num_lines": 3, "path": "/old/meet_mute.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "tmp=$(xdotool getactivewindow); \n/usr/bin/xdotool windowactivate --sync $(/usr/bin/xdotool search meet) key ctrl+d; \n/usr/bin/xdotool windowactivate $tmp\n" }, { "alpha_fraction": 0.7654321193695068, "alphanum_fraction": 0.7654321193695068, "avg_line_length": 12.333333015441895, "blob_id": "7dd9f27f575fb86a7a770180c000bef1fe661781", "content_id": "74cb820b900a2fa66b959487090fa018a13de554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 81, "license_type": "no_license", "max_line_length": 17, "num_lines": 6, "path": "/old/reset_keys.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource ~/.bashrc\nsetxkbmap -option\nkillall xbindkeys\nkillall xcape\n\n" }, { "alpha_fraction": 0.6328571438789368, "alphanum_fraction": 0.6442857384681702, "avg_line_length": 23.13793182373047, "blob_id": "105ecfe919fe22928333f799199b08f72c0017d2", "content_id": "dfda0b8a0b482fd3a5c4b96c8dd2d88305b85cba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 700, "license_type": "no_license", "max_line_length": 94, "num_lines": 29, "path": "/old/swopera.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nsource ~/.bashrc\nzenity --notification --text='SWOPERA STARTED!'\nrm ~/.kill_swopera 2> /dev/null # remove backdoor\n\nop_active=0 # assume opera not active to start\n\nwhile true; do\n\tsleep .05\n\t\n\t# store old state\n\told=$op_active\n\t\n\t# set op_active if neccessary\n\txdotool getactivewindow getwindowname | grep -Eiq 'opera|calc' && op_active=1 || op_active=0\t\n\n\t# check if op_active has changed\n\tif [ \"$op_active\" -ne \"$old\" ]; then\n\t\t[[ \"$op_active\" -eq 1 ]] && opera_mac && echo 'OPERA or CALC!'\n\t\t[[ \"$op_active\" -eq 0 ]] && mac && echo 'ELSE!'\n\tfi\n\t\n\t# backdoor to exit loop (touch ~/.kill_swopera) \n\t[[ -f ~/.kill_swopera ]] && break\n\ndone\n\n\nzenity --notification --text='Swopera ENDED!'\n" }, { "alpha_fraction": 0.37837839126586914, "alphanum_fraction": 0.38223937153816223, "avg_line_length": 36, "blob_id": "28252603cd8b95c015c831e60ac4014d95e32800", "content_id": "2063cb784f218af71f9f8678dec8e76c66e91aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 259, "license_type": "no_license", "max_line_length": 87, "num_lines": 7, "path": "/tex.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npdflatex -interaction nonstopmode $1 | grep '\\!' && $HOME/my_scripts/toast.sh skull.png\npkill -HUP mupdf\necho '======================================='\necho '============ DONE ====================='\necho '======================================='\n" }, { "alpha_fraction": 0.6008174419403076, "alphanum_fraction": 0.6389645934104919, "avg_line_length": 47.93333435058594, "blob_id": "ee70ceb1c076d0139ddd8ba2ba4e06f6d5cb60ff", "content_id": "dfbf2a84001cb716cbbd7f079ead48b3e9f4083e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 734, "license_type": "no_license", "max_line_length": 153, "num_lines": 15, "path": "/dl_open.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nwhile [ -f /home/kessler/Downloads/*opdownload ]; do sleep .1; done\n\n# check for new pdfs and open with mupdf\nfind /home/kessler/Downloads/ -cmin -.1 -type f -iname '*.pdf' | xargs -r -n1 -P10 -iz /usr/local/bin/mupdf-x11 -C FFFFCC \"z\"\n\n# check for new image files and open with feh\nfind /home/kessler/Downloads/ -cmin -.1 -type f -regextype egrep -regex '.*.png|.*.jpg|.*.gif' | xargs -r -iz /usr/local/bin/feh -Z -x -g +1000+500 \"z\"\n\n\n# old method (not even sure if it worked)\n#source ~/.scriptsrc\n#[[ -z `find ~/Downloads/ -mmin -.01 -iname '*.pdf' -type f` ]] && exit 0\n#ls -1rt ~/Downloads/*.pdf | tail -n1 | xargs -iz mupdf-gl \"z\"\n#ls -1rt ~/Downloads/*.pdf | tail -n1 | xargs -iz mupdf-x11 -r 120 -C FFFDD0 \"z\"\n" }, { "alpha_fraction": 0.6052104234695435, "alphanum_fraction": 0.7204408645629883, "avg_line_length": 28.294116973876953, "blob_id": "db0fb2ae0b14d11d33639a309e914d2e28362c91", "content_id": "69339ee347ffda2c9b7b139628bdab945c56d18c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 998, "license_type": "no_license", "max_line_length": 86, "num_lines": 34, "path": "/fnt", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\necho dont use this it messes up horz spacing\n#fontstr=\"Menlo $1\"\n#dconf write /org/gnome/desktop/interface/monospace-font-name \"'Menlo $1'\"\n\n\n\n# the below is all redundant (instead use system wide monospace setting above)\n\n## local\n#key='/org/gnome/terminal/legacy/profiles:/:425cff8d-2fe7-49e0-850a-2d5953a7164a/font'\n#dconf write $key \"'Menlo $1'\"\n#\n### rhino\n#key='/org/gnome/terminal/legacy/profiles:/:0b06c44c-ba09-451d-95d5-405fbbac85d1/font'\n#dconf write $key \"'Menlo $1'\"\n#\n## bear\n#key='/org/gnome/terminal/legacy/profiles:/:b9124069-4e11-41f6-ba67-fa25ddffd6d7/font'\n#dconf write $key \"'Menlo $1'\"\n#\n## turquoise (cheyenne?)\n#key='/org/gnome/terminal/legacy/profiles:/:5594c966-cace-47bc-89a0-680b53a9a013/font'\n#dconf write $key \"'Menlo $1'\"\n#\n## latenight\n#key='/org/gnome/terminal/legacy/profiles:/:20998e02-8c01-4670-aab8-2ef16bb557cf/font'\n#dconf write $key \"'Menlo $1'\"\n#\n##flux\n#key='/org/gnome/terminal/legacy/profiles:/:5d667012-532b-46e5-9927-ff5111f95d91/font'\n#dconf write $key \"'Menlo $1'\"\n\n\n" }, { "alpha_fraction": 0.6057142615318298, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 33.79999923706055, "blob_id": "5bb043a5cb11546fbc68e9e4e3a20a84ec893e34", "content_id": "29e7ef29d49560dbddba9c8c2922a67506f4ebe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 525, "license_type": "no_license", "max_line_length": 95, "num_lines": 15, "path": "/unlock.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource /home/kessler/.scriptsrc\n\n#[[ $1 == \"-f\" ]] && rm /home/kessler/.kb\n\n#(if monitor connected with 2K res, assume docked)\n[[ `xrandr | grep 2560` ]] && /home/kessler/my_scripts/set_kb.sh m\n[[ `xrandr | grep 2560` ]] || /home/kessler/my_scripts/set_kb.sh w\n\n#/home/kessler/my_scripts/set_kb.sh m \n# run any other settings that dont seem to stick\n#for id in $(xinput | grep -iE 'mouse|logitech' | grep -Eo 'id=[0-9]+' | grep -Eo '[0-9]+'); do\n#\txinput set-prop $id \"Evdev Scrolling Distance\" -1 1 1;\n#done\n\n\n\n" }, { "alpha_fraction": 0.6333907246589661, "alphanum_fraction": 0.6368330717086792, "avg_line_length": 17.677419662475586, "blob_id": "4445d72fdebf209d2f35ff2bf8a715282ecccef2", "content_id": "560c1a0382840f9c6f93c4f39b2774dbf326d363", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 581, "license_type": "no_license", "max_line_length": 71, "num_lines": 31, "path": "/xdo_slides.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# takes argument in and calls xdotool to navigate the menu bar with alt\n\nsleep .5\n\n\ncase $1 in\n\t\tnewslide)\n\t\t\t/usr/bin/xdotool key --clearmodifiers alt+s n\n\t\t\t;;\n\t\tsuper)\n\t\t\t/usr/bin/xdotool key --clearmodifiers alt+o s s\n\t\t\t;;\n\t\tsub)\n\t\t\t/usr/bin/xdotool key --clearmodifiers alt+o s r\n\t\t\t;;\n\t\tfnt+) # not working \n\t\t\t/usr/bin/xdotool key --clearmodifiers shift+ctrl+greater\n\n\t\t\t;;\n\t\tfnt-) # not working \n\t\t\t/usr/bin/xdotool key --clearmodifiers shift+ctrl+less\n\t\t\t;;\n\t\trepimg)\n\t\t\t/usr/bin/xdotool key --clearmodifiers alt+o M Z U\n\t\t\t;;\n\t\t*)\n\t\t\techo else\n\t\t\t;;\n\nesac\n\n\n" }, { "alpha_fraction": 0.5964630246162415, "alphanum_fraction": 0.6318327784538269, "avg_line_length": 21.925926208496094, "blob_id": "c49d780deeef8027a6d5dc168eb1cf4ea2de067c", "content_id": "57fe2c437bc1c08b459e8f630e78374bb5c7b119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 622, "license_type": "no_license", "max_line_length": 64, "num_lines": 27, "path": "/old/new_timer", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nsleep .1\nxdotool key super+Up # detach window if necessary\n#xdotool key control+l # clear screen doesn't work\nsleep .5\nxdotool key --clearmodifiers shift+F10 r 7\nsleep .5\nwmctrl -r :ACTIVE: -e 0,10000,0,300,100 # move to top right\nwmctrl -r :ACTIVE: -N TIMER # rename window\n\n\n\n\n\n\nend=$(date -d \"+ $1 minutes\" +%s) #define date in epoch seconds\ndiff=$(( $end - $(date +%s) )) #define difference \n\nwhile [[ $diff -gt 0 ]]; do\n\techo -ne \"$(date -u -d @$diff +%H:%M:%S )\\n\"\n\tdiff=$(( $end - `date +%s` )) #define difference \n\tsleep 1;\ndone\n\n# times up!\nfeh -x -. $HOME/pics/icons/sands-of-time.png\nexit\n\t\n\n" }, { "alpha_fraction": 0.6260504126548767, "alphanum_fraction": 0.6281512379646301, "avg_line_length": 19.65217399597168, "blob_id": "ca9189ac3d48f7f39923b67b1176dec1dfb3a651", "content_id": "724a59e33f5b3eb7272f68f1f2065d88c1ff2196", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 476, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/opera_go.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n. ~/.scriptsrc\n\n# get highlighted string from clip board\nhl_str=$(xclip -o) \n\n# determine if string is a url or search phrase\necho $hl_str | grep -Ei 'http|\\.gov|\\.com|\\.org|\\.net' \n\nif [ $? -eq 1 ]; then\n\techo google searching...\n\tsrch=$(echo $hl_str | sed -r 's/\\s+/\\+/g')\n\techo $srch\n\tvivaldi duckduckgo.com/?q=$srch\nelse\n\techo going directly to url...\n\tvivaldi $hl_str\nfi\n\n\n# old working\n#str=$(xclip -o | sed -r 's/\\s+/\\+/g')\n#opera google.com/search?q=$str\n\n" }, { "alpha_fraction": 0.6870748400688171, "alphanum_fraction": 0.7006802558898926, "avg_line_length": 15.333333015441895, "blob_id": "0882c893b66fa7d98eba31a0eb6a862a1d79d918", "content_id": "4b1382aa84e6f8074d882343ccaaf67bb7182cc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 147, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/old/reset_kb.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource ~/.scriptsrc # load paths\n\ntoast.sh reset.png .75\npkill autokey \npkill xbindkeys\n#/bin/autokey-gtk & #> /dev/null & \nxbindkeys\n" }, { "alpha_fraction": 0.5411764979362488, "alphanum_fraction": 0.7470588088035583, "avg_line_length": 20, "blob_id": "ee3948328d8f82c21cf06abbe32105fa70ec7712", "content_id": "71d6dfc0b14c0284bffae3b64ebbf04e7fbafeef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 170, "license_type": "no_license", "max_line_length": 100, "num_lines": 8, "path": "/old/calendar_create.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "\nname=$1\nloc=$2\nt0=$3\ntf=$4\n\n#20201008T040000Z%2F20201008T050000Z\"\n\nopera \"https://www.google.com/calendar/render?action=TEMPLATE&text=$name&location=$loc&dates=%2F$tf\"\n\n" }, { "alpha_fraction": 0.620309054851532, "alphanum_fraction": 0.6379690766334534, "avg_line_length": 28.19354820251465, "blob_id": "6664c87865d3d131054c633189b5a2c772ec7728", "content_id": "18697db450a685c307bc1f873fb7d3e37e5ef484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 906, "license_type": "no_license", "max_line_length": 100, "num_lines": 31, "path": "/old/from_mac/break_timer.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# begin timer script\nshort_break=20 #minutes\n#short_break=.5 #minutes\nsnooze=1 #minutes\n\nresponse=$(osascript -e 'display dialog \"break timer is activated!\"')\nif [ -z \"$response\" ]; then\n\tosascript -e 'display dialog \"break timer has been cancelled for this session\" buttons {\"got it!\"}'\n\texit 1\nfi\n\nsleep $(echo ${short_break}*60 | bc)\n\nwhile true; do\n\tif pgrep -q ScreenSaverEngine; then # check if screen saver is running\n\t\twhile true; do\n\t\t\tif ! pgrep -q ScreenSaverEngine; then #if SS is done, break out of loop\n\t\t\t\tosascript -e 'display dialog \"break timer RESUMED!\"'\n\t\t\t\tsleep $(echo ${short_break}*60 | bc)\n\t\t\t\tbreak \n\t\t\tfi\n\t\t\tsleep 10\n\t\tdone\n\tfi\t\n\n\tresponse=$(osascript -e 'display dialog \"take a break, James!\"' 2> /dev/null)\n\t[[ -z $response ]] && sleep $(echo ${snooze}*60 | bc)\n\t[[ -n $response ]] && sleep $(echo ${short_break}*60 | bc)\ndone\n\n" }, { "alpha_fraction": 0.6580086350440979, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 56.75, "blob_id": "9818491ca6ee1ac9a556f1465f5a13d36f629374", "content_id": "3ef8f8d70ac513acceb0c60ca3978e8b35e38b10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 231, "license_type": "no_license", "max_line_length": 146, "num_lines": 4, "path": "/remind_backup.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# this SHOULD have been able to be a single command in the cron...grrr\n[ $(/usr/bin/date +%d) -le 7 ] && /usr/bin/zenity --warning --text='First Monday! \\nBack things up \\n update Venmo balance' --no-wrap 2> /dev/null\n" }, { "alpha_fraction": 0.6515151262283325, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 21, "blob_id": "672b519d0195961a824ad75d9c0b9204a81facdc", "content_id": "ecc5e2d98d86313085adb8755c9c7faa9b99bb16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "no_license", "max_line_length": 52, "num_lines": 3, "path": "/old/from_mac/fsync.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwatch -n3 rsync -a rhino:/home/kessler/figs/ ~/figs/\n" }, { "alpha_fraction": 0.6356589198112488, "alphanum_fraction": 0.6589147448539734, "avg_line_length": 15.125, "blob_id": "998bda127a7b09dc15d9f8871a26a9dbea30448c", "content_id": "607036cd43cc9fa77dc3cd39c41f061e00594092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 129, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/unpack_ext.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nunzip $1 metadata.json\nuuid=$(jq -r '.uuid' metadata.json && rm metadata.json)\nmkdir $uuid\nunzip $1 -d $uuid\n\nrm $1\n" }, { "alpha_fraction": 0.7191011309623718, "alphanum_fraction": 0.7191011309623718, "avg_line_length": 28.44444465637207, "blob_id": "b165a342aaa62481c4fe96c90a4d5c3d7467baeb", "content_id": "68040009e690a1905268b9cb3bf59a2db275b476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 267, "license_type": "no_license", "max_line_length": 68, "num_lines": 9, "path": "/toggle_contrast.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncurrent=$(dconf read /org/gnome/desktop/interface/gtk-theme)\n\nif [ \"$current\" == \"'Adwaita-dark'\" ]; then\n\tdconf write /org/gnome/desktop/interface/gtk-theme \"'HighContrast'\"\nelse\n\tdconf write /org/gnome/desktop/interface/gtk-theme \"'Adwaita-dark'\"\nfi\n\t\n" }, { "alpha_fraction": 0.6245733499526978, "alphanum_fraction": 0.6407849788665771, "avg_line_length": 21.538461685180664, "blob_id": "c1813128d488d727ee2f7c045427c95f56dcbbcf", "content_id": "241b29894478c094005b484563a5f8134959beae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1174, "license_type": "no_license", "max_line_length": 82, "num_lines": 52, "path": "/old/highlight.py", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/home/kessler/.linuxbrew/bin/python\n\nimport sys\nimport fitz\nimport os\nimport numpy as np\n\nfname = sys.argv[1] # filename\npgnum = int(sys.argv[2]) # page number to annotate\ncolstr = sys.argv[3] # color to use for highlight\ntext = sys.argv[4] # search string\n\n\n# fix color string to color tuple\ncol = [ float(f) for f in colstr.split(',') ]\ncol = tuple(col)\n\nprint(fname, pgnum, col, text)\n#text = \"ϕm and ϕh\" # in detail and includes ample data for the very stable case.\"\n\ndoc = fitz.open(fname)\n# save current state of file for undo\ndoc.save('/home/kessler/docs/papers/.undo/'+fname)\n\nfound=False\n\n#for page in doc:\n#\trects = page.searchFor(text) #, hit_max=1)\n#\tfor r in rects:\n#\t\tpage.addHighlightAnnot(r)\n#\t\tfound=True\n#\n\npage=doc[pgnum-1]\nrects = page.searchFor(text) #, hit_max=1)\nprint(len(rects))\n#for r in rects:\nif rects:\n\tr=np.asarray(rects)\n\tx0=min(r[:,0])\n\tx1=max(r[:,2])\n\ty0=min(r[:,1])\n\ty1=max(r[:,3])\n\n\tmyannot=page.drawRect((x0,y0,x1,y1) , color=col, fill=col,overlay=False)\n\tfound=True\n\nif not found:\n\tprint('could not find string!')\n\ndoc.saveIncr()\nos.system('pkill -HUP mupdf')\n" }, { "alpha_fraction": 0.5840708017349243, "alphanum_fraction": 0.5929203629493713, "avg_line_length": 23.565217971801758, "blob_id": "09da2980f9223b16c835b6d609ee2f3fcf58a01d", "content_id": "40d49c50269bb60cb17278843358d1baacd6962c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 565, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/launch_term.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npid=$(pgrep terminal | tail -n 1)\necho $pid\n\n\nif [[ -z $pid ]]; then # if terminal is not running open it\n\t/bin/gnome-terminal\nelse # else give it focus and open a new tab\n\t\n\t# option a: give focus to local term (if none, then open one)\n\twmctrl -a local \n\t[[ $? == 1 ]] && /bin/gnome-terminal --tab\t\n\t\n\n\t# option b: \n\t#########wmctrl -i -R `wmctrl -lp | grep $pid | cut -d' ' -f1`\n\n\t# using hangups with terminal\n\t#wmctrl -i -R `wmctrl -lp | grep $pid | grep -v hangups | cut -d' ' -f1`\n\t#[[ $? > 0 ]] && /bin/gnome-terminal\n\t#xdotool key Super+t\nfi\n" }, { "alpha_fraction": 0.6902050375938416, "alphanum_fraction": 0.6970387101173401, "avg_line_length": 30.071428298950195, "blob_id": "c2e6ed1e26c19f6a13caf5651815889b5e2d0421", "content_id": "c4cb08d52a810ea4050a62a05b9fff65872bffd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 439, "license_type": "no_license", "max_line_length": 100, "num_lines": 14, "path": "/daemon_unlock.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#/home/kessler/.linuxbrew/bin/gdbus monitor -y -d org.freedesktop.login1 | \\\n\n/bin/gdbus monitor -y -d org.freedesktop.login1 | \\\nwhile read line; do \n\tif echo $line | grep -q Unlock; then \n\t\tsleep 1\n\t\tnotify-send 'Unlocked!'; # remove this later\n\t\t/home/kessler/my_scripts/unlock.sh\n\tfi; \n\n\t# backdoor for runaways\n\tif [ -e /home/kessler/.backdoors/monitor_lock ]; then break; notify-send 'exiting monitor_lock'; fi\ndone\n\n\n\n\n" }, { "alpha_fraction": 0.5958333611488342, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 23, "blob_id": "2b4c21b31aa9ad6d6f8af975eec989d902e97e71", "content_id": "75827d153a1ad2bf0a9cb97473b7be18e1218775", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 240, "license_type": "no_license", "max_line_length": 95, "num_lines": 10, "path": "/toast.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nicon=\"/home/kessler/images/icons/$1\"\n#timer=${2:-1}\ntimer=${2:-.5}\n\n\ntimeout $timer /usr/local/bin/feh -. -x -g 150x150-1 $icon # & echo $! > ~/my_scripts/.toastpid\n\n#sleep .1\n#/home/kessler/.linuxbrew/bin/xdotool key Super+Tab\n" }, { "alpha_fraction": 0.6779661178588867, "alphanum_fraction": 0.6817325949668884, "avg_line_length": 32.1875, "blob_id": "35cc37ad95cd7e143e7579bfd6970ed510b521dd", "content_id": "db1f22862ed1f7ba92f4abaae09e11a62247afd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 531, "license_type": "no_license", "max_line_length": 107, "num_lines": 16, "path": "/show_meet.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# show or hide \n\nstr='^meet'\n#str='Pithos'\n\n#nwind=$(xdotool search --onlyvisible --name $str | wc -l)\n#if [ $nwind -gt 1 ]; then /home/kessler/my_scripts/toast.sh hazard-sign.png; fi\n#if [ $nwind -eq 0 ]; then /home/kessler/my_scripts/toast.sh hazard-sign.png; fi\n\nsource ~/.scriptsrc\t\nif [[ -n $(xprop -id $(xdotool search --onlyvisible --name $str) | grep -i 'window state: Iconic') ]]; then\n\txdotool search --onlyvisible --name $str windowactivate\nelse\n\txdotool search --onlyvisible --name $str windowminimize\nfi\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7355371713638306, "avg_line_length": 23.100000381469727, "blob_id": "2a569c1380b1829b63654266ac854238752192af", "content_id": "bd38fc5dc77e1ca9eb673f079cd6c19012d47e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 242, "license_type": "no_license", "max_line_length": 105, "num_lines": 10, "path": "/reload_opera.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "tmp=$(xdotool getactivewindow); \nwmctrl -R Opera\nsleep .2\n/usr/bin/xdotool key ctrl+r\nsleep .2\n/usr/bin/xdotool windowactivate $tmp\n\n\n\n#/usr/bin/xdotool windowactivate --sync $(/usr/bin/xdotool search --onlyvisible --name opera) key ctrl+r; \n" }, { "alpha_fraction": 0.6498855948448181, "alphanum_fraction": 0.6567505598068237, "avg_line_length": 18.863636016845703, "blob_id": "caf2234feef4793e0162e0bf0e2b83a7dc9f682e", "content_id": "76de642552b2bb0ec6781595454513e5d97e4048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 437, "license_type": "no_license", "max_line_length": 88, "num_lines": 22, "path": "/old/from_mac/OLDdoc_2_pdf", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport infile=$1\noutfile=\"$(echo $infile | rev | cut -d/ -f1 | rev | tr -d '\\' | tr ' ' _ | cut -d. -f1)\"\noutfile=Users:kessler:Desktop:${outfile}.pdf\n\n\necho $outfile\nrm /$(echo $outfile | tr : \"/\")\n\n#set out_name to \"$outfile\"\nosascript <<EOF\nset in_name to \"$infile\"\nset out_name to \"$outfile\"\n\ntell app \"Microsoft Word\"\n\topen in_name\n\tsave as active document file name out_name file format format PDF\n\tquit\nend tell\n\nEOF\n" }, { "alpha_fraction": 0.6983606815338135, "alphanum_fraction": 0.7016393542289734, "avg_line_length": 32.88888931274414, "blob_id": "acbaf5f30007673d0b2aa1372d2d57ed1de11686", "content_id": "a3e9a1356d51267ab33688a5ebcf8b43eb2e550e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 610, "license_type": "no_license", "max_line_length": 107, "num_lines": 18, "path": "/show_music.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# show or hide sakura (spotify) depending on current state\n\nstr='^spotify$|Pithos|^ncspot$|Pandora|pianobar'\n#str='Pithos'\n\n\n\nnwind=$(xdotool search --onlyvisible --name $str | wc -l)\nif [ $nwind -gt 1 ]; then /home/kessler/my_scripts/toast.sh hazard-sign.png; fi\nif [ $nwind -eq 0 ]; then /home/kessler/my_scripts/toast.sh hazard-sign.png; fi\n\nsource ~/.scriptsrc\t\nif [[ -n $(xprop -id $(xdotool search --onlyvisible --name $str) | grep -i 'window state: Iconic') ]]; then\n\txdotool search --onlyvisible --name $str windowactivate\nelse\n\txdotool search --onlyvisible --name $str windowminimize\nfi\n" }, { "alpha_fraction": 0.694915235042572, "alphanum_fraction": 0.694915235042572, "avg_line_length": 13.25, "blob_id": "52085be87611f036a419ef4a87fbafb239ffbff1", "content_id": "527b1ec2dec6f2d621721e0e1b8d73fc0d8313c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 59, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/old/restart_autokey.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npgrep autokey | xargs kill\n/bin/autokey-gtk \n\n" }, { "alpha_fraction": 0.5633187890052795, "alphanum_fraction": 0.5977680683135986, "avg_line_length": 40.060001373291016, "blob_id": "96c3c2df5a61289230901e699423fc19a098b1b1", "content_id": "a96755a397820f9b64f87df8cdaa653775b230aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2061, "license_type": "no_license", "max_line_length": 119, "num_lines": 50, "path": "/old/bt_volume.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# mute/unmute mic: (simple way `amixer set Capture toggle`, but so slow for some reason)\nif [[ $1 == 'mic' ]]; then\n\tamixer set Capture toggle\n\tamixer get Capture | grep -q '\\[off\\]' && ~/my_scripts/toast.sh mic_mute.png .5 \n\tamixer get Capture | grep -q '\\[off\\]' || ~/my_scripts/toast.sh mic_mute.png .5 \n\texit 0\nfi\n\n\n\n# control output volume\n[[ \"$1\" == \"mute\" ]] && amixer set Master toggle\n[[ \"$1\" == \"up\" ]] && amixer set Master 3%+\n[[ \"$1\" == \"down\" ]] && amixer set Master 3%-\n\nlvl=$(amixer get Master | grep -io -m1 '[0-9]\\+\\%') # get percentage \namixer get Master | grep '\\[off\\]' && ~/my_scripts/toast.sh mute.png .5 &\ntimeout .5 ~/.linuxbrew/bin/feh -. -x -g 150x150-1 ~/pics/icons/headphones.png --info \"echo ' $lvl'\" -e \"yudit/24\"\n\n\n\n\n# oldway using pactl (confusing since i had to search for the \"sink\" to control \n##killall feh 2> /dev/null\n# first try to find bluetooth sink (non-zero, possibly two digits)\n## search for the patter #NN in between the words sink and 'bluez' to determine the sink index for BT\n##idx=$(pactl list sinks | sed -nr '/^Sink/,/bluez/p; /bluez/q' | grep -Eo '#[0-9]+' | tr -d '#')\n#idx=$(pactl list sinks | grep -i \"RUNNING\" -B1 | grep -oE '[0-9]+')\n#\n## otherwise adjust global volume (idx = 0)\n#[[ -z $idx ]] && idx=1\n#\n#echo $idx\n#echo $1\n#\n#[[ \"$1\" == \"mute\" ]] && pactl set-sink-mute $idx toggle\n#[[ \"$1\" == \"up\" ]] && pactl set-sink-volume $idx +3%\n#[[ \"$1\" == \"down\" ]] && pactl set-sink-volume $idx -3%\n#\n## get stats (vol lvl and mute)\n#stats=$(pactl list sinks | sed -n \"/^Sink #$idx/,/^Sink/p\" | sed -nr '/^\\s+Volume/p;/^\\s+Mute/p')\n#echo $stats | grep -iq 'Mute: yes' && timeout .5 feh -. -x -g 100x100 ~/pics/icons/mute.png \n#echo $stats | grep -iq 'Mute: yes' && ~/my_scripts/toast.sh mute.png .25\n\n#lvl=$(echo $stats | grep -oE '[0-9]{1,3}%' | head -n1)\n#timeout .25 feh -. -x -g 100x100 ~/pics/icons/headphones.png --info \"echo ' $lvl'\" -e \"yudit/18\"\n#timeout .5 ~/.linuxbrew/bin/feh -. -x -g 150x150-1 ~/pics/icons/headphones.png --info \"echo ' $lvl'\" -e \"yudit/24\"\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6473429799079895, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 12.800000190734863, "blob_id": "ee5558833ff2f664edde6fdf660ac99048beaafa", "content_id": "951deca67ec35a51b9b873152a87488add05707d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 207, "license_type": "no_license", "max_line_length": 76, "num_lines": 15, "path": "/old/99wakeup.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# this script does nothing for now\n# found a more ideal solution for monitor unlocks using gdbus (~/my_scripts/\n\ncase \"$1\" in\n\tpre)\n\techo hi > /dev/null\n\t;;\n\n\tpost)\n\techo hi > /dev/null\n\t;;\n\nesac\n" }, { "alpha_fraction": 0.7207207083702087, "alphanum_fraction": 0.7207207083702087, "avg_line_length": 54, "blob_id": "afbbd3f2203d6a47de5dc7d128ee48ad9b280a24", "content_id": "ef0deb9ca10861e544da44ac9acc3cbf539dd405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 111, "license_type": "no_license", "max_line_length": 97, "num_lines": 2, "path": "/dl_watch.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nwhile true; do find /home/kessler/Downloads/ | entr -pd /home/kessler/my_scripts/dl_open.sh; done\n\n" }, { "alpha_fraction": 0.6054628491401672, "alphanum_fraction": 0.6327769160270691, "avg_line_length": 30.190475463867188, "blob_id": "7f2289a8ab7b5043aad994f911158dc807f232e3", "content_id": "f525e1fc56ec4e10a80865f67eafa6ad674e3599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 659, "license_type": "no_license", "max_line_length": 86, "num_lines": 21, "path": "/old/old_set_kb.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \ncase \"$1\" in \n\tw) \n\tkillall xcape\n /bin/setxkbmap -option 'altwin:swap_alt_win, ctrl:nocaps, shift:both_capslock'\n /home/kessler/.local/bin/xcape -e 'Control_L=Escape'\n /bin/xmodmap ~/.Xmodmap_numpad\n\ttimeout .5 feh -. -x -g 100x100+1 ~/pics/icons/window.png\n\n\t;;\n\n\tm)\n /bin/setxkbmap -option\n killall xbindkeys 2> /dev/null\n killall xcape\n /bin/setxkbmap -option 'ctrl:nocaps, shift:both_capslock, apple:alupckeys'\n /home/kessler/.local/bin/xcape -e 'Control_L=Escape'\n /home/kessler/.local/bin/xbindkeys\n\ttimeout .5 feh -. -x -g 100x100+1 ~/pics/icons/shiny-apple.png\n\t;;\nesac\n\n\n\t\n" }, { "alpha_fraction": 0.5849889516830444, "alphanum_fraction": 0.6114790439605713, "avg_line_length": 25.58823585510254, "blob_id": "c424a6ba789b61b162e7df1a2403d256636303b0", "content_id": "586049167830b3491fbd2910ff8b02cb1db305df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 453, "license_type": "no_license", "max_line_length": 77, "num_lines": 17, "path": "/playctl.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport LD_LIBRARY_PATH=/usr/local/lib64; \n\nif [[ $1 == 'next' ]]; then\n\t/usr/local/bin/playerctl -a $1 || echo -n 'n' > $HOME/.config/pianobar/ctl \n\t#/usr/local/bin/playerctl -a $1;\n \t#/usr/bin/xdotool key --delay 100 --clearmodifiers ctrl+period;\nfi\n\nif [[ $1 == 'play-pause' ]]; then\n\t/usr/local/bin/playerctl -a $1 || echo -n 'p' > $HOME/.config/pianobar/ctl \nfi\n\nif [[ $1 == 'previous' ]]; then\n\t/usr/local/bin/playerctl -a $1 \nfi\n\n" }, { "alpha_fraction": 0.6962962746620178, "alphanum_fraction": 0.7148148417472839, "avg_line_length": 24.714284896850586, "blob_id": "c5e82b56f00145e2d1b65cbe742dce2aa33284d3", "content_id": "bd986856799d7b83df5b2f9b039848d11ea36549", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 540, "license_type": "no_license", "max_line_length": 98, "num_lines": 21, "path": "/gslides_mode.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\ndir=/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings\n\n\nif [[ $1 == 'on' ]]; then\n\tpkill autocutsel\n\tdconf reset $dir/custom24/binding\n\tdconf reset $dir/custom15/binding\n\nelif [[ $1 == 'off' ]]; then\n\tdconf write $dir/custom24/binding \"'<Primary>period'\"\n\tdconf write $dir/custom15/binding \"'<Primary>comma'\"\n\t/usr/local/bin/autocutsel -fork & # synchronize PRIMARY (mouse highlight) and CLIPBOARD (ctrl+c) \n\t/usr/local/bin/autocutsel -selection PRIMARY -fork &\n\n\nelse \n\techo 'usage: gslides_mode.sh on|off'\nfi\n" }, { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.6506666541099548, "avg_line_length": 22.3125, "blob_id": "e0334fb2a1d07a4413a931bc92e8671801ee2aad", "content_id": "89e8d40a35d23722b02aa9b9005442a2da0e3009", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 375, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/tp", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# turn trackpad off\nif ! [ -f ~/.tp_off ]; then\n\txinput disable 12;\n\t#timeout .25 feh -. -x -g 100x100+1 ~/pics/icons/click_red.png\n\t~/my_scripts/toast.sh click_red.png 1 \n\ttouch ~/.tp_off\n\t\t\n# turn trackpad on\nelse\n\txinput enable 12;\n\t#timeout .25 feh -. -x -g 100x100+1 ~/pics/icons/click_green.png\n\t~/my_scripts/toast.sh click_green.png 1\n\trm ~/.tp_off\nfi\n\t\n" }, { "alpha_fraction": 0.647709310054779, "alphanum_fraction": 0.6508688926696777, "avg_line_length": 25.25, "blob_id": "3ff5ee416d69ede6e4b4e434cef33fa9d42b6bd0", "content_id": "79e0f9eee4fff5353a83177d6ed846c364703cc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 633, "license_type": "no_license", "max_line_length": 85, "num_lines": 24, "path": "/launch_nautilus.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# simple: kill the current nautilus and launch a new one everytime (can be annoying)\n#/bin/nautilus -q\n#/bin/nautilus -n ~\n\n\n\n## sophisticated: never launch a new window (can be slow)\n#pid=$(pgrep -x nautilus | tail -n 1)\n#echo $pid\n#if [[ -z $pid ]]; then # if nautilus is not running open it @ home dir\n#\t/bin/nautilus -n ~ &\n#else # else give it focus and open a new tab\n#\t# option b: \n#\twmctrl -i -R `wmctrl -lp | grep $pid | cut -d' ' -f1`\n#fi\n\n\n# best (i think)....\n# search for existing window || open a new one\n#wmctrl -xR nautilus.Nautilus || /bin/nautilus \n\nwmctrl -xR nautilus.Nautilus || /bin/nautilus \n\n\n" }, { "alpha_fraction": 0.6652542352676392, "alphanum_fraction": 0.6694915294647217, "avg_line_length": 25.11111068725586, "blob_id": "f11de35576f59f2e788b9dfbb0a03fa302116e58", "content_id": "e6d705b3b6fa167f3c9a95930a1cafda05d21654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 472, "license_type": "no_license", "max_line_length": 93, "num_lines": 18, "path": "/show_voice.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# show or hide sakura (hangups) depending on current state\n\n#str=$1\nstr='^Voice - *'\n#str='^Messages$'\n\n\nnwind=$(xdotool search --onlyvisible --name \"$str\" | wc -l)\nif [ $nwind -ne 1 ]; then /home/kessler/my_scripts/toast.sh hazard-sign.png; fi\n\n\nsource ~/.scriptsrc\t\nif [[ -n $(xprop -id $(xdotool search --name $str) | grep -i 'window state: Iconic') ]]; then\n\txdotool search --name $str windowactivate\nelse\n\txdotool search --name $str windowminimize\nfi\n\n\n" }, { "alpha_fraction": 0.7412587404251099, "alphanum_fraction": 0.7622377872467041, "avg_line_length": 34.5, "blob_id": "ab8ceb944217c67d7eed3ec0ef81931dffa61303", "content_id": "f6bd94c941eea85b5749e0fed9d1247179b49889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 143, "license_type": "no_license", "max_line_length": 75, "num_lines": 4, "path": "/cp_link.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n/home/kessler/.linuxbrew/bin/xdotool click 3 key Down Down Down Down Return\n/home/kessler/my_scripts/toast.sh paper-clip.png .25\n\n" }, { "alpha_fraction": 0.6566416025161743, "alphanum_fraction": 0.6654135584831238, "avg_line_length": 28.55555534362793, "blob_id": "c9ad0ab505b79b5d76f9c2b8c16745f32e72ef7c", "content_id": "4469a12ea81934bb559c4a4a6431f8c2fa426b36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 67, "num_lines": 27, "path": "/gimp/scale_layer.py", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# coding: utf-8\n\nfrom gimpfu import *\n\ndef scale_layer_to_canvas_size(img, layer):\n\tpdb.gimp_image_undo_group_start(img)\n\tfactor = min (float(img.width) / layer.width,\n\tfloat(img.height) / layer.height)\n\tlayer.scale(int(layer.width * factor), int(layer.height * factor))\n\tlayer.set_offsets((img.width - layer.width) / 2,\n\t(img.height - layer.height) / 2)\n\tpdb.gimp_image_undo_group_end(img)\n\n\nregister(\"scale-layer-to-canvas-size\",\n\t\"Scale layer to canvas size\",\n\t\"Scales the layer to canvas size, keeping the aspect ratio\",\n\t\"João S. O. Bueno\", \"Public domain\", \"2014\",\n\tN_(\"Scale layer to canvas size...\"),\n\t\"*\",\n\t[(PF_IMAGE, \"image\", \"Input image\", None),\n\t(PF_DRAWABLE, \"layer\", \"Input drawable\", None), ], [],\n\tscale_layer_to_canvas_size, menu=\"<Image>/Layer/\",\n\t)\n\nmain()\n" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 21.285715103149414, "blob_id": "768f1f8209e620ac7950daf5134d80e5bd927225", "content_id": "3582f44b78bf730c512967a9adf1b188cbcfb76b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 156, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/old/highlight.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [[ \"$1\" == \"rm\" ]]; then\n\tsleep .2; xdotool click 3 key --delay 60 Up Up Return\nelse\n\tsleep .2; xdotool key --delay 200 shift+Tab Return\nfi\n" }, { "alpha_fraction": 0.6274510025978088, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 16, "blob_id": "b7e9edfe7021d7bbc4c0973b2b498eefbb9a8e1b", "content_id": "b14a06962d595e51253b0a2782714c73c40be934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 51, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/old/bt_reset.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "echo -e 'power off\\n'\nsleep 2\necho -e 'power on\\n'\n" }, { "alpha_fraction": 0.6073170900344849, "alphanum_fraction": 0.6260162591934204, "avg_line_length": 21.77777862548828, "blob_id": "8b984ee4f1821e9c19c79a23ca3e9076c47c261f", "content_id": "611057f9785bcda32e690dec071dbb4339fc8024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1230, "license_type": "no_license", "max_line_length": 120, "num_lines": 54, "path": "/old/call_highlight.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource ~/.scriptsrc\n\nwinttl=$(xdotool getactivewindow getwindowname)\n\nfn=$(echo $winttl | cut -d\\ -f1)\npg=$(echo $winttl | cut -d\\ -f3 | cut -d/ -f1)\n\n# do checks to avoid calling this by accident\nif [[ $fn != *pdf || $pg != +([0-9]) ]]; then\n\techo fn or pg is wrong\n\t/home/kessler/my_scripts/toast.sh skull.png .5\n\texit\nfi\n\n\ncd $HOME/docs/papers/\n\nwhile getopts \"ypur\" OPTION; do\n\tcase $OPTION in\n\t\ty) echo \"yellow color set\"\n\t\tcol=(1,1,0)\n\t\t;;\n\t\t\n\t\tp) echo \"pink color set\"\n\t\tcol=(1.000,0.412,0.706)\n\t\t;;\n\n\t\tu) echo \"undoing...\"\n\t\tcp -f .undo/$fn .\n\t\tpkill -HUP mupdf\n\t\texit\n\t\t;;\n\n\t\tr) echo \"reverting...\"\n\t\tcp -f .orig/$fn .\n\t\tpkill -HUP mupdf\t\n\t\texit\n\t\t;;\n\tesac\ndone\n\n#backup if necessary\n[[ -e .orig/$fn ]] || cp $fn .orig/${fn} && echo 'backing up file'\n\n# do highlighting \n#str=$(xclip -o | tr '\\n' ' ' | sed -r 's/- /-/g') # capture highlighted text (replace \\n with space except for hyphens)\nstr=$(xclip -o | tr '\\n' ' ') # capture highlighted text (replace \\n with space except for hyphens)\n# save highlighted text to notes folder\necho $str >> notes/${fn%%.pdf}.txt\npython3 $HOME/my_scripts/highlight.py $fn $pg $col \"$str\" # use PyMuPDF to highlight\n\n/home/kessler/my_scripts/toast.sh paint-brush.png .5\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 32, "blob_id": "cd5d9a4f71bbd51eff8778c6d7d1305c9a5a4df9", "content_id": "924e1497d72e4ae1280165a4de32d90ca5476d3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 33, "license_type": "no_license", "max_line_length": 32, "num_lines": 1, "path": "/hide_else.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "wmctrl -k on; wmctrl -R :ACTIVE:\n" }, { "alpha_fraction": 0.6223153471946716, "alphanum_fraction": 0.630172848701477, "avg_line_length": 43.372093200683594, "blob_id": "f4c36be5fd5eb670f630e4d9eaa3ec3cbabab659", "content_id": "70d02ef8567a49a461b80df7d40c40b79809943d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 136, "num_lines": 43, "path": "/old/backup_set_kb.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\n\n#timer=${2:-.2}\n#timer=${2:-0}\n\nwd=/home/kessler/my_scripts\n\n#${wd}/toast.sh thumb-up.png 0\n\ncase \"$1\" in \n\tw) # settings for using in laptop mode (i.e. windows keyboard)\n\t${wd}/toast.sh dungeon-light.png 0 # show window icon during configuration\n\txset -q | grep -Eo 'Caps Lock:\\s+\\w+' | grep on && xdotool key Caps_Lock # turn off caps if it's on\n\tkillall xcape\n\twmctrl -n 2 \n /usr/bin/gsettings set org.gnome.desktop.input-sources xkb-options \"['altwin:swap_alt_win, ctrl:nocaps, shift:both_capslock' ]\"\n /home/kessler/.local/bin/xcape -e 'Control_L=Escape'\n\t#/bin/xmodmap -p | grep Alt_L && /bin/xmodmap /home/kessler/.Xmodmap # run xmodmap if necessary (moved to restart_ak)\n\t[[ $(nmcli radio wifi) == disabled ]] && nmcli radio wifi on\n\t#pkill -F ${wd}/.toastpid # kill icon \n\t;;\n\n\tm) # settings for docked mode (i.e. mac keyboard)\n\t${wd}/toast.sh shiny-apple.png 0 # show apple icon during configuration\n\txset -q | grep -Eo 'Caps Lock:\\s+\\w+' | grep on && xdotool key Caps_Lock\n killall xbindkeys \n killall xcape\n\twmctrl -n 1\n /usr/bin/gsettings set org.gnome.desktop.input-sources xkb-options \"['ctrl:nocaps, shift:both_capslock, apple:alupckeys' ]\"\n /home/kessler/.local/bin/xcape -e 'Control_L=Escape'\n /home/kessler/.local/bin/xbindkeys\n\t/bin/xmodmap -e \"keycode 112=Alt_R\"\n\t#/bin/xmodmap -p | grep Alt_L && /bin/xmodmap /home/kessler/.Xmodmap # run xmodmap if necessary (moved to restart_ak)\n\t#/bin/xmodmap -p | grep Alt_L && /bin/xmodmap /home/kessler/.Xmodmap # run xmodmap if necessary (moved to restart_ak)\n\t[[ $(nmcli radio wifi) == disabled ]] || nmcli radio wifi off\n\t;;\nesac\n\t\n#${wd}/toast.sh thumb-up.png 0\n\nsleep .5 # sleep for just a second so that icon actuallly shows\npkill -F ${wd}/.toastpid # kill icon \n" }, { "alpha_fraction": 0.6518151760101318, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 29.299999237060547, "blob_id": "315756c309711557449da79f99466d066416606c", "content_id": "dde2c7cd64ae69fc054c06022ddd9342ea22b76a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 606, "license_type": "no_license", "max_line_length": 109, "num_lines": 20, "path": "/old/bt_sink.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "## first ensure that one of the known bt headphones are connected\n#pactl list | grep -qEi 'qy7|wraith' || ( zenity --notification --text='connect BT device!' && export err=1 )\n#\n#echo $err\n#[ $err ] && exit\n\n\n\n# find the id of the non-internal speakers (presumably BT headphones)\n# and make that the default output device\n# get id\nid=$(pactl list sink-inputs | grep -Eom1 '[0-9]+')\nsym=$(pactl list sinks | grep -i bluez | grep -i name | sed 's/Name://')\n\necho $id\necho $sym\n\n\npactl move-sink-input $id $sym || notify-send 'could NOT set BT!'\ntimeout .2 feh -. -x -g 100x100+1 ~/pics/icons/radio-tower.png\n" }, { "alpha_fraction": 0.49367088079452515, "alphanum_fraction": 0.5221518874168396, "avg_line_length": 30.600000381469727, "blob_id": "624332af8d7143e365951aa3b6a05c579a17c928", "content_id": "d7b11e174f8799430b26373d9b127f5605aadc98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 316, "license_type": "no_license", "max_line_length": 101, "num_lines": 10, "path": "/old/clipwrite.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntitle=$(xdotool getwindowfocus getwindowname | cut -d. -f1)\n\ntimeout .5 feh -. -x -g 100x100+1 ~/pics/icons/stabbed-note.png\n\noutfile=/home/kessler/docs/papers/notes/${title}.txt\n\nxclip -o >> $outfile\necho -e '\\n \\n ===================================================================== \\n' >> $outfile\n" }, { "alpha_fraction": 0.6728231906890869, "alphanum_fraction": 0.6754617691040039, "avg_line_length": 29.31999969482422, "blob_id": "77c5b905f17d20e112a5729502c36d62a34c361f", "content_id": "8f0a051d47278acb9964392809effeb24626de74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 758, "license_type": "no_license", "max_line_length": 101, "num_lines": 25, "path": "/show_msgs.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# show or hide sakura (hangups) depending on current state\n\n#str=$1\nstr='^Messages$|^Calls$|^Voicemail$|^Incoming\\scall:|^Call\\swith'\n#str='^Messages$'\n\nnwind=$(xdotool search --onlyvisible --name \"$str\" | wc -l)\nif [ $nwind -ne 1 ]; then /home/kessler/my_scripts/toast.sh hazard-sign.png; fi\n\nsource ~/.scriptsrc\t\nif [[ -n $(xprop -id $(xdotool search --name $str) | grep -i 'window state: Iconic') ]]; then\n\txdotool search --name $str windowactivate\nelse\n\txdotool search --name $str windowminimize\nfi\n\n\n\n#source ~/.scriptsrc\t\n#if [[ -n $(xprop -id $(xdotool search --name '^hangups$') | grep -i 'window state: Iconic') ]]; then\n#\txdotool search --name '^hangups$' windowactivate\n#else\n#\txdotool search --name '^hangups$' windowminimize\n#fi\n" }, { "alpha_fraction": 0.6189427375793457, "alphanum_fraction": 0.642070472240448, "avg_line_length": 18.319149017333984, "blob_id": "183854f5bbfbfdf95f706d802dc0d868ed367cc3", "content_id": "01d997105f697d6be71e47c657f135e5c5cc37e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 908, "license_type": "no_license", "max_line_length": 64, "num_lines": 47, "path": "/rm_highlight.py", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/home/kessler/.linuxbrew/bin/python\n\n\n\nimport sys\nimport fitz\nimport os\n\nfname = sys.argv[1] # filename\npgnum = int(sys.argv[2]) # page number to annotate\ntext = sys.argv[3] # search string\n\n\nprint(type(pgnum))\nprint('python...')\nprint(text)\n\n\ndoc = fitz.open(fname)\n# save current state of file for undo\ndoc.save('/home/kessler/docs/papers/.undo/'+fname)\n\nfound=False\n\n#for page in doc:\n#\trects = page.searchFor(text) #, hit_max=1)\n#\tfor r in rects:\n#\t\tpage.addHighlightAnnot(r)\n#\t\tfound=True\n#\n\n\npage=doc[pgnum-1]\nrects = page.searchFor(text) #, hit_max=1)\nfor r in rects:\n\t#myannot=page.addHighlightAnnot(r)\n\tcol=(1,1,0) # yellow\n\t#col=(1.000, 0.412, 0.706) # hotpink\n\n\tmyannot=page.drawRect(r, color=col, fill=col,overlay=False)\n\tfound=True\n\nif not found:\n\tprint('could not find string!')\n\ndoc.saveIncr()\nos.system('pkill -HUP mupdf')\n" }, { "alpha_fraction": 0.6996644139289856, "alphanum_fraction": 0.7114093899726868, "avg_line_length": 37.869564056396484, "blob_id": "87d9d04669ebe171ada91e9314267b03ea0a4c2d", "content_id": "12008518e9940ab9d74e8e4d67ea6fcb98c71aba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1788, "license_type": "no_license", "max_line_length": 175, "num_lines": 46, "path": "/set_kb.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\nsource ~/.scriptsrc\n\ncase \"$1\" in \n\tw) # settings for using in laptop mode (i.e. windows keyboard)\n\t\ttoast.sh windows-95.png .5 # show window icon during configuration\n\t\t#xset -q | grep -Eo 'Caps Lock:\\s+\\w+' | grep on && xdotool key Caps_Lock # turn off caps if it's on\n\t\t\t/usr/bin/gsettings set org.gnome.desktop.input-sources xkb-options \"['altwin:swap_alt_win, ctrl:nocaps, shift:both_capslock', 'terminate:ctrl_alt_bksp','ctrl:rctrl_ralt']\"\n\t\t[[ $(nmcli radio wifi) == disabled ]] && nmcli radio wifi on\n\t\txrandr --output eDP-1 --auto\n\t\techo win > ~/.kb\n\t;;\n\n\tm) # settings for docked mode (i.e. mac keyboard)\n\t\ttoast.sh bowen-knot.png .5 # show apple icon during configuration\n\t\t\t/usr/bin/gsettings set org.gnome.desktop.input-sources xkb-options \"['ctrl:nocaps, shift:both_capslock, apple:alupckeys', 'terminate:ctrl_alt_bksp', 'numpad:mac']\"\n\t\t[[ $(nmcli radio wifi) == disabled ]] || nmcli radio wifi off\n\t\techo mac > ~/.kb\n\t\txrandr --output eDP-1 --off\n\t\txrandr -s 2560x1440\n\t;;\nesac\n\t\n\n#sleep .5 # sleep for just a second so that icon actuallly shows before killing it\n#pkill -F ~/my_scripts/.toastpid # kill icon \n\n\n# do universal stuff\nkillall autocutsel\n/usr/local/bin/autocutsel -fork & # synchronize PRIMARY (mouse highlight) and CLIPBOARD (ctrl+c) \n/usr/local/bin/autocutsel -selection PRIMARY -fork &\n\n# restart xbindkeys/xmodmap and xcape\npkill xbindkeys; xbindkeys\n/usr/bin/xmodmap -e \"keycode 169 = dead_greek dead_greek dead_greek dead_greek\"\nkillall xcape; /usr/bin/xcape -e 'Control_L=Escape'\n\n\n# daemon section (using systemctl is a pain due to env differences)\n#pkill dl_watch.sh\n#pkill entr\n#pkill dl_open.sh\n#/usr/bin/gsettings set org.gnome.desktop.interface monospace-font-name 'Monospace 13'\n#nohup ~/my_scripts/dl_watch.sh &> /dev/null &\n" }, { "alpha_fraction": 0.6150870323181152, "alphanum_fraction": 0.6595744490623474, "avg_line_length": 50.599998474121094, "blob_id": "8d1e65d08c7ce0bdbdb80a2e882e630a93461cf8", "content_id": "5fcbb3a7caef9892d0f09a6389019f41e7b1078f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 517, "license_type": "no_license", "max_line_length": 129, "num_lines": 10, "path": "/old/from_mac/mount.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# stuff is currently mounted ? || no? ...mount it!\nps aux | grep sshfs | grep -iq rhino || /opt/local/bin/sshfs [email protected]:/home/kessler/ ~/rhino -o volname=RHINO -o local\n#ps aux | grep sshfs | grep -iq atoz || /opt/local/bin/sshfs kessler@atoz:/bigvol/home/kessler/ ~/atoz -o volname=ATOZ -o local\n/opt/local/bin/sshfs [email protected]:/mnt/ ~/mnt -o volname=MNT -o local\n\n## x11 running || run it!\n#pgrep -iq xquartz || /opt/x11/bin/xterm -e 'echo x11 opened; sleep .5'\n#wait\n\n" }, { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 39.400001525878906, "blob_id": "c0805b442b99139da4e1b6dcfafa5a9ae590a550", "content_id": "d59279aadae30823b1c21e3b557c207cf53059d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 203, "license_type": "no_license", "max_line_length": 103, "num_lines": 5, "path": "/word-char-expres.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor prof_id in $(dconf list /org/gnome/terminal/legacy/profiles:/ | grep :); do\n\tdconf write /org/gnome/terminal/legacy/profiles:/${prof_id}word-char-exceptions '@ms \"-#%&+,./:=?@_~\"'\ndone\n\n" }, { "alpha_fraction": 0.6272189617156982, "alphanum_fraction": 0.6272189617156982, "avg_line_length": 14.272727012634277, "blob_id": "9f785c9cfa9e7c540c08883cbe9e2a478962a7ce", "content_id": "922e5da66c7776c7877ff3d24b93c873b5881d22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 169, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/old/mute_mic.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#mute=$(/usr/bin/amixer set Capture toggle | grep '\\[off\\]')\n\n#mute='something'\n\nif [[ -z $mute ]]; then\n\ttoast.sh mic.png \nelse\n\ttoast.sh mic_mute.png\nfi\n\n" }, { "alpha_fraction": 0.5539568066596985, "alphanum_fraction": 0.6223021745681763, "avg_line_length": 15.29411792755127, "blob_id": "19fced751c907b3c763536a42603b900c5e4a114", "content_id": "5c534ea47b02b18b8cc04cd82bae3987883f1c99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 278, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/old/bt_reconnect.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfunction repair(){\n\tmac=\"$1\"\n\techo -e 'scan on\\n'\n\techo -e \"remove $mac\\n\"\n\tsleep 8\n\techo -e \"pair $mac\\n\"\n\tsleep 3\n\techo -e \"trust $mac\\n\"\n\tsleep 1\n\techo -e \"connect $mac\\n\"\n}\n\n\nrepair 34:88:5D:B5:EA:E3 # home office mouse\n#repair 34:88:5D:B5:B2:A7 # work mouse\n\n" }, { "alpha_fraction": 0.6850828528404236, "alphanum_fraction": 0.7292817831039429, "avg_line_length": 29.16666603088379, "blob_id": "b564c836d6d8e2cfe558f7455b294414af4564a9", "content_id": "babf8d0fc966e2526a773f0741553c3ea6ec4068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 362, "license_type": "no_license", "max_line_length": 96, "num_lines": 12, "path": "/fin", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#icon=\"/home/kessler/pics/icons/$1\"\n\n# center screen\n#/home/kessler/.linuxbrew/bin/feh -. -x -g +1000+500 /home/kessler/pics/icons/checkered-flag.png\n\n# standard toast style (but permanent)\n/home/kessler/.linuxbrew/bin/feh -. -x -g 150x150-1 /home/kessler/pics/icons/checkered-flag.png\n\n\n#sleep .1\n#/home/kessler/.linuxbrew/bin/xdotool key Super+Tab\n" }, { "alpha_fraction": 0.662162184715271, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 26.25, "blob_id": "ac9b0c8d250258d0acf33d8e697e5ee09c6770bf", "content_id": "1e72ada76512938bb19a8864e19cb21184e6f813", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 222, "license_type": "no_license", "max_line_length": 85, "num_lines": 8, "path": "/old/daemon_hups.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\ndbus-monitor | \nwhile read line; \n\tdo echo $line | grep -i hangups && ~/my_scripts/toast.sh nothing-to-say.png .5; done\n\n\t# backdoor for runaways\n\tif [ -e /home/kessler/.backdoors/hups ]; then break; \ndone\n\n\n\n\n" }, { "alpha_fraction": 0.703071653842926, "alphanum_fraction": 0.7098976373672485, "avg_line_length": 21.461538314819336, "blob_id": "dce6dcf95db8c5be11eedfc914da5cd44778bcfc", "content_id": "29f434fa06deeac44501c8c9bc04fd29ddea2dc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 293, "license_type": "no_license", "max_line_length": 74, "num_lines": 13, "path": "/old/from_mac/doc2pdf.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ninput=\"$1\"\n\ntouch $input\n# convert to html (which cupsfilter can handle)\ntextutil -convert html -output \"/Users/kessler/Downloads/temp.html\" $input\n\nsleep 1 \nnew=${input%.*}.pdf # set new name for pdf\necho $new\ncupsfilter ~/Downloads/temp.html > \"$new\"\nrm ~/Downloads/temp.html\n\n" }, { "alpha_fraction": 0.6915887594223022, "alphanum_fraction": 0.6947040557861328, "avg_line_length": 23.69230842590332, "blob_id": "6226b3a25705ffd5d301719985546c931262037b", "content_id": "f3a5046c6d6c12b5f3eb7212ca5dd24eb958a53f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 321, "license_type": "no_license", "max_line_length": 72, "num_lines": 13, "path": "/pin_window.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# check if window is currently \"ABOVE\"\nxprop -id $(xdotool getactivewindow) | grep NET_WM_STATE | grep -q ABOVE\n\nif [ $? -eq 0 ]; then \n\twmctrl -r :ACTIVE: -b remove,above\n\t#/home/kessler/my_scripts/toast.sh safety-pin.png\nelse\n\twmctrl -r :ACTIVE: -b add,above\n\t/home/kessler/my_scripts/toast.sh pin.png\nfi\n" }, { "alpha_fraction": 0.6352941393852234, "alphanum_fraction": 0.6735293865203857, "avg_line_length": 25, "blob_id": "1bf5fa0cab0ec4c748fc26a395f1404d597d5914", "content_id": "26f5a5f56b0cae38058eb1364c31429fe7b7136b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 340, "license_type": "no_license", "max_line_length": 73, "num_lines": 13, "path": "/screenshot.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# read in args and take a screenshot\n\nargs=\"$@\"\nsavedir=$HOME/images/screenshots/\n\necho -e \"\\0007\"\ndate +%b%d_%H%M%S | xargs -iz gnome-screenshot $args -f ${savedir}/z.png\n#gnome-screenshot $args -f ${savedir}/z.png\n\n\n#timeout .5 feh -. -x -g 100x100+1 ~/pics/icons/photo-camera.png\n~/my_scripts/toast.sh photo-camera.png .5\n\n\n" }, { "alpha_fraction": 0.708737850189209, "alphanum_fraction": 0.7160193920135498, "avg_line_length": 18.571428298950195, "blob_id": "23aefa20bc3c2b91600ece53907c7a76909b8166", "content_id": "6feaaf903a732c564ca5db82cfcd0d3892bfb645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 412, "license_type": "no_license", "max_line_length": 62, "num_lines": 21, "path": "/startup.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "\n## try sleeping for 5 seconds\nsleep 5; \n\nnotify-send 'hello world!'\nsource ~/.scriptsrc\n\n\n# increase screen res\n#big 2> /dev/null\n\n##start xbindkeys\n#xbindkeys\n\n# call unlock script \nrm ~/.kb # keyboard has not been configured\n/home/kessler/my_scripts/unlock.sh\n\n# start lock monitor \nnohup /home/kessler/my_scripts/daemon_unlock.sh &> /dev/null &\n\n#while true; do find ~/Downloads | entr -dp open_pdf.sh; done\n" }, { "alpha_fraction": 0.7515923380851746, "alphanum_fraction": 0.7579618096351624, "avg_line_length": 51.33333206176758, "blob_id": "bed98be6de06cbad789800ee603a4e36aed54b1b", "content_id": "fd8d8a3623a77d4cded883f14841ae65120022ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 157, "license_type": "no_license", "max_line_length": 86, "num_lines": 3, "path": "/old/fsync.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "# constantly rsync rhino ~/figs to local ~/figs to see updated plots\n\nwatch -n2 rsync -a kessler@rhino:/home/kessler/figs/int_comp/ /home/kessler/local_figs\n" }, { "alpha_fraction": 0.7019867300987244, "alphanum_fraction": 0.7019867300987244, "avg_line_length": 17.5, "blob_id": "626eb98c21f1a755694369889952d50e7cd149fc", "content_id": "07a7848f7fc3bae77dc2716b8523887010ea5f98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 151, "license_type": "no_license", "max_line_length": 64, "num_lines": 8, "path": "/old/launch_pulse.sh", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\nif [[ -z $(pgrep pulseUi) ]]; then\n\t/usr/local/pulse/pulseUi\nelse\n\txdotool search --onlyvisible --classname pulseUi windowactivate\nfi\n\t\n\n" }, { "alpha_fraction": 0.3400796353816986, "alphanum_fraction": 0.5185799598693848, "avg_line_length": 26.907407760620117, "blob_id": "5ca0533196ce195c0d9e2a3f54a65a6ab475e0a6", "content_id": "e5eeae8a1a68f557361204858bcd531ee3d7021c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3017, "license_type": "no_license", "max_line_length": 77, "num_lines": 108, "path": "/old/emoticon.py", "repo_name": "brotherJ4mes/bash_scripts", "src_encoding": "UTF-8", "text": "\"\"\"Hangouts emoticon to emoji converter.\"\"\"\n\n\ndef replace_emoticons(string):\n \"\"\"Replace emoticon words in string with corresponding emoji.\"\"\"\n return _replace_words(HANGOUTS_EMOTICONS_TO_EMOJI, string)\n\n\ndef _replace_words(replacements, string):\n \"\"\"Replace words with corresponding values in replacements dict.\n\n Words must be separated by spaces or newlines.\n \"\"\"\n output_lines = []\n for line in string.split('\\n'):\n output_words = []\n for word in line.split(' '):\n new_word = replacements.get(word, word)\n output_words.append(new_word)\n output_lines.append(output_words)\n return '\\n'.join(' '.join(output_words) for output_words in output_lines)\n\n\n# Emoticon conversions extracted from hangouts.google.com\nHANGOUTS_EMOTICONS_TO_EMOJI = {\n '\\+1': '\\U0001f44d',\n '\\-1': '\\U0001f44e',\n '\\\\alien':'\\U0001F47D',\n '\\\\baby':'\\U0001f476',\n '\\\\bawl': '\\U0001F62D',\n '\\\\beer': '\\U0001F37A', \n '\\\\bomb': '\\U0001F4A3',\n '\\\\burger': '\\U0001F354',\n '\\cat': '\\U0001f638',\n '<3cat': '\\U0001F63B',\n '<3face': '\\U0001F60D',\n '\\check': '\\U00002705',\t\t\n '\\clock':'\\U000023F0',\n '\\crab': '\\U0001f980',\n '\\cry': '\\U0001f62d',\n '\\cool': '\\U0001f60e',\n '\\coffee': '\\U00002615',\n '\\curl' : '\\U0001F4AA',\n '\\devil': '\\U0001f608',\n '\\drink': '\\U0001F378',\n '\\down': '\\U0001F447',\n '\\eggplant': '\\U0001F346',\n '\\\\flex' : '\\U0001F4AA',\n '\\\\fam' : '\\U0001F46A',\n '\\\\facepalm' :'\\U0001F926',\n '\\gun' : '\\U0001F52B',\n '\\hot' : '\\U0001F525',\n '\\cheers': '\\U0001F37B', \n '\\hat': '\\U0001F3A9',\n '\\halo': '\\U0001f607',\n '\\hi5' : '\\U0001F64C',\n '\\home': '\\U0001F3E1',\n '\\hockey': '\\U0001F3D2',\n '\\honey': '\\U0001f41d',\n '\\hug': '\\U0001f917',\n '\\kiss': '\\U0001f617',\n '\\knucks': '\\U0001F44A',\n '\\loud': '\\U0001F4E2',\n '\\left': '\\U0001F448',\n '\\lmao': '\\U0001F602',\n '\\monkey': '\\U0001f435',\n '\\milk' : '\\U0001F37C',\n '\\\\no': '\\U0001f645',\n '\\ok' : '\\U0001F44C',\n '\\oh': '\\U0001f62e',\n '\\pig': '\\U0001f437',\n '\\pizza': '\\U0001F355',\n '\\please':'\\U0001F64F',\n '\\peace':'\\U0000270C',\n '\\peach':'\\U0001F351',\n '\\poo': '\\U0001f4a9',\n '\\question': '\\U0001f64b',\n '\\\\rockon': '\\U0001f918',\n '\\\\right': '\\U0001F449',\n '\\skull': '\\U0001F480',\n '\\swim': '\\U0001F3CA',\n '\\\\up': '\\U0000261D',\n '\\\\updown': '\\U0001f643',\n '\\wave': '\\U0001F44B',\n '\\wink': '\\U0001f60f',\n '\\wine': '\\U0001F377', \n '\\wrench': '🔧 ',\n '\\\\xfingers': '\\U0001F91E',\n '!:)': '\\U0001f643',\n '</3': '\\U0001f494',\n '<3': '\\U00002764',\n ':D': '\\U0001f600',\n '^_^': '\\U0001f601',\n '=D': '\\U0001f604',\n ';)': '\\U0001f609',\n ':)': '\\U0000263a',\n '=)': '\\U0001f60a',\n ':|': '\\U0001f610',\n '-_-': '\\U0001f611',\n ':/': '\\U0001f615',\n ':p': '\\U0001f61c',\n ':(': '\\U0001f61e',\n ':o': '\\U0001f62e',\n 'X(': '\\U0001f635',\n ':3': '\\U0001f638',\n '\\o': '\\U0001f64b',\n '\\m/': '\\U0001f918',\n}\n" } ]
60
Rakib-Mahmud/Python
https://github.com/Rakib-Mahmud/Python
2683b50c676a16ddb2e96f128fa2aeba0abbe832
e1c1568d2bd14a6b886d2dc9c26caa12f8c2f3b1
a25a42591ed7f73fd2a73e063b34e0fcf6667f8a
refs/heads/main
2023-03-04T05:47:40.558353
2021-02-12T05:25:43
2021-02-12T05:25:43
338,231,661
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.38893845677375793, "alphanum_fraction": 0.40231937170028687, "avg_line_length": 31.969696044921875, "blob_id": "2be99b8a465556abdd2907bb314a09f32f0e4def", "content_id": "ae2fd814c65ab17f30d34b39c20e93852456f7b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/decode.py", "repo_name": "Rakib-Mahmud/Python", "src_encoding": "UTF-8", "text": "import pandas as pd\r\n##############################################################\r\n#Read encoded files and tracked nucleotides in encode section\r\n##############################################################\r\ndata = pd.read_csv('encoded.csv')\r\ntags = pd.read_csv('tags.csv')\r\nrows, cols = tags.shape\r\n#mapper = {1:'A', 2:'C', 3:'G', 4:'T', 5:'.', 6:'GGG', 7:'AAC'}\r\n#string = [1,2,3,4,5,6,7]\r\n######################################\r\n#declare map and necessary variables\r\n######################################\r\nmapper = {}\r\nstring = []\r\n\r\n####################################\r\n#map number codes with nucleotides\r\n####################################\r\nfor i in range(0,rows):\r\n \r\n k,val = eval(tags['Tags'][i])\r\n mapper[val] = k\r\n string.append(val)\r\n###################################\r\n#replace codes with nucleotides\r\n###################################\r\nfor i in string:\r\n data['refvar'].replace(i, mapper[i], inplace=True)\r\n data['qvar'].replace(i, mapper[i], inplace=True)\r\n##########################\r\n#Save the decoded files\r\n##########################\r\ndata.to_csv('decoded.csv',index=False)\r\n" }, { "alpha_fraction": 0.4352855980396271, "alphanum_fraction": 0.44323933124542236, "avg_line_length": 32.625, "blob_id": "8083b5e4493c65f15aac85647983b8770df4a0af", "content_id": "f471840eec545dc43d4d30c0c6a8883d732fe235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 86, "num_lines": 40, "path": "/encode.py", "repo_name": "Rakib-Mahmud/Python", "src_encoding": "UTF-8", "text": "import pandas as pd\r\n###########################\r\n#Read sample data\r\n###########################\r\ndata = pd.read_csv('sample.csv')\r\nrows, cols = data.shape\r\n#mapper = {'A':1, 'C':2, 'G':3, 'T':4, '.':5, 'GGG':6, 'AAC':7}\r\n#string = ['A','C','G','T','.','GGG','AAC']\r\n######################################\r\n#declare map and necessary variables\r\n######################################\r\nmapper = {}\r\nstring = []\r\ncount = 0\r\n####################################\r\n#map nucleotides with number codes\r\n####################################\r\nfor i in range(0,rows):\r\n if data['refvar'][i] not in mapper.keys():\r\n count += 1\r\n mapper[data['refvar'][i]] = count\r\n string.append((data['refvar'][i],count))\r\n \r\n if data['qvar'][i] not in mapper.keys():\r\n count += 1\r\n mapper[data['qvar'][i]] = count\r\n string.append((data['qvar'][i],count))\r\n###################################\r\n#replace nucleotides with codes\r\n###################################\r\nfor i,j in string:\r\n data['refvar'].replace(i, mapper[i], inplace=True)\r\n data['qvar'].replace(i, mapper[i], inplace=True)\r\n##########################\r\n#Save the files\r\n##########################\r\ndata.to_csv('encoded.csv',index=False)\r\ndict = {'Tags': string} \r\ndf = pd.DataFrame(dict) \r\ndf.to_csv('tags.csv',index=False) #keep track of replaced nucleotides to get back them" } ]
2
baddyn/NUMPY
https://github.com/baddyn/NUMPY
8193f935888cde3135b52b1e57419cf994ae551e
4f84dfb9c0fe1d2808602e6414aae6342ce7f770
d076f48c2bebba30dd548a4fb4bb5cdbeadd3bed
refs/heads/master
2020-07-05T23:26:54.801215
2019-08-17T00:29:52
2019-08-17T00:29:52
202,816,147
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 20, "blob_id": "0477928b6289969065145fccb018f9015d7262ac", "content_id": "b364e9e3021f04a8cc69619a60656f8d1c270620", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 53, "num_lines": 3, "path": "/README.md", "repo_name": "baddyn/NUMPY", "src_encoding": "UTF-8", "text": "# NUMPY\n\nCONTAINS CODE FOR MAJORLY USED FUNCTIONS WITH NUMPY .\n" }, { "alpha_fraction": 0.5748308300971985, "alphanum_fraction": 0.6376184225082397, "avg_line_length": 9.289693832397461, "blob_id": "f420730b92f89f5c011c9c033a80d2d100886b60", "content_id": "2a7282a0b3cb56be0976371c8c7a993997d00d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7390, "license_type": "no_license", "max_line_length": 104, "num_lines": 718, "path": "/applied ai numpy.py", "repo_name": "baddyn/NUMPY", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\na=np.array([0,1,2,3])\nprint(a)\n#numpy is very close to hardware\n# designed for scientific computation\n#thats why so efficient with multi dim arrays\n#even though multi dim arrays can be built normally\n#we use numpy as very fast\nprint(np.arange(10)) #no.from 0 to n-1\n\n\n# In[2]:\n\n\n#proof that numpy is faster\n\n#python list\nl=range(1000)\nget_ipython().run_line_magic('timeit', '[i**2 for i in l] #square calcln')\n#%timeit mesures avg time for computation(mean)\n\n\n# In[4]:\n\n\n#now for numpy arrays\na=np.arange(1000)\nget_ipython().run_line_magic('timeit', 'a**2')\n\n\n# In[9]:\n\n\n#now lets explore numpy\n\na=np.array([0,1,2,3,4]) #this is array not a list\n# to check jsut type a\na\n\n\n# In[11]:\n\n\na.ndim #no. of dim\n\n\n# In[12]:\n\n\na.shape\n\n\n# In[13]:\n\n\nlen(a)\n\n\n# In[15]:\n\n\n#2-d 3-d\n\nb=np.array([[0,1,2],[1,2,3]]) #list of lists\nb\n\n\n# In[16]:\n\n\nb.ndim\n\n\n# In[17]:\n\n\nb.shape\n\n\n# In[18]:\n\n\nlen(b)\n\n\n# In[21]:\n\n\n#shape tells shape\n#len gives the size of first \n#i.e. no. of rows\n\n#3-d array\n\nc=np.array([[[0,1],[2,3]],[[4,5],[6,7]]])\n#3d is 2ds stacked on top of each other\nc\n\nprint(c.ndim)\nprint(c.shape)\nprint(len(c))\n\n#n dim array is called a tensor\n#thats why tensorflow\n\n\n# In[23]:\n\n\nprint(np.arange(1,11,2)) # start ,end(exclusive),step \n\n\n# In[25]:\n\n\n#linspace linear space\na=np.linspace(0,1,6) # start,end,no.of points\na\n\n\n# In[29]:\n\n\n#common arrays\n\na=np.ones((3,3))\nprint(a) # 3x3 array of 1s\n\nb=np.zeros((2,2))\nprint(b)\n\nc=np.eye(3) #identity matrices\nprint(c)\n\n#important is identity are also used for rect matrices\nnp.eye(3,2)\n\n\n# In[33]:\n\n\n#diag function\n\na=np.diag([1,2,3,4])\nprint(a)\n#cretaes array with given values as diag\n\n#to print diag elements\nprint(np.diag(a))\n\n\n# In[35]:\n\n\n#random \na=np.random.rand(4)\n#create arr of given shape an fill it with random samples from 0-1\na\n\n\n# In[36]:\n\n\na=np.arange(10)\na.dtype\n\n\n# In[39]:\n\n\na=np.arange(10,dtype='float')\na\n\n\n# In[40]:\n\n\n#important zeros and ones create float\n\na=np.zeros((3,3))\nprint(a.dtype)\n\n\n# In[41]:\n\n\n#complex values\n\nd=np.array([1+2j,2+4j])\nprint(d.dtype)\n\n\n# In[44]:\n\n\nb=np.array([True,False,True])\nprint(b.dtype)\n\n\n# In[50]:\n\n\na=np.array(['sita','gita','rita'])\na.dtype\n\n\n# In[52]:\n\n\n#indexing\n\na=np.arange(10)\nprint(a[4])\na[3]=1112\nprint(a)\n#value changed\n\n\n# In[54]:\n\n\n#slicing\n#to obtain subarrays\n\na=np.arange(10)\n\na[1:8:2] #startindex,endindex,stepindex\n\n\n# In[55]:\n\n\n#combining assignment and slicing\n\na=np.arange(10)\na[5:]=10 #means 5 to end index\n\nprint(a)\n\n\n# In[57]:\n\n\nb=np.arange(5)\na[5:]=b[::-1]#no value before first colon means start at 0 ,end si also empty so endindex and step by -1\nprint(a)\n\n\n# In[60]:\n\n\n#copies and views\na=np.arange(10)\nb=a[::2]\nb\n\n#note slicing doesnt create copy \n#it points to some locations inside the same array\n#using view it points a subarray ,no copy created\n#to check this\n\nnp.shares_memory(a,b)\n\n\n# In[61]:\n\n\n#now if i change a also will change\nb[0]=10\nprint(b)\nprint(a)\n#both changed\n\n\n# In[62]:\n\n\n#to enforce copy use copy fucntion\n\na=np.arange(10)\nc=a[::2].copy()\n\nnp.shares_memory(a,c)\n# now diff location\n#changing c wont change a\n\n\n# In[66]:\n\n\n#fancy indexing\n#another for of creating random numbers \n\na=np.random.randint(0,20,15)\nmask=(a%2==0)\n#only even numbers\nextract_from_a=a[mask]\n\nextract_from_a\n#masks create copy and not views\n\n\n# In[68]:\n\n\na[mask]=-1\na\n#all ven changed to -1\n\n\n# In[71]:\n\n\n#indexing with arr of integers\n\na=np.arange(0,100,10)\na[[2,3,2,4,2]] #these indices will be displayed\n\n\n# In[73]:\n\n\na[[9,7]]=-200\na\n\n\n# In[75]:\n\n\n#LECTURE 2 NUMPY\n\n\n#NUMERICAL OPERATIONS ON NUMPY\n\na=np.array([1,2,3,4])\na+1\n#added to every element\n\n\n# In[76]:\n\n\na**2\n\n\n# In[78]:\n\n\nb=np.ones(4)+1\na-b\n\n\n# In[79]:\n\n\na*b #element wise multiplication\n\n\n# In[80]:\n\n\nc=np.diag([1,2,3,4])\nprint(c*c) #normal matrix multn\nprint(\"********************\")\nprint(c.dot(c)) #dot product \n#that si here dimensiosn are reverse of each other\n\n\n# In[82]:\n\n\na=np.array([1,2,3,4])\nb=np.array([4,2,3,2])\n#array wise compn\nprint(a==b)\nprint(a>b)\n\n\n# In[85]:\n\n\n#elemnet wise comparison\nc=np.array([1,2,3,4])\nprint(np.array_equal(a,b))\nnp.array_equal(a,c)\n\n\n# In[86]:\n\n\na=np.array([1,0,1,0],dtype=bool)\nb=np.array([1,1,1,0],dtype=bool)\n\nnp.logical_or(a,b) #and also there\n\n\n# In[87]:\n\n\n#mathm fnc\na=np.arange(5)\n\nnp.sin(a) #log,cos etc all there\n\n\n# In[89]:\n\n\n#important shape mismatch\n\na=np.arange(4)\na+np.array(1,2)\n\n\n# In[90]:\n\n\n#reductions \n\nx=np.array([1,2,3,4])\nnp.sum(a)\n\n\n# In[91]:\n\n\nx=np.array([[1,1],[2,2]])\nx\n\n\n# In[92]:\n\n\nx.sum(axis=0)#column wise sum\n\n\n# In[93]:\n\n\nx.sum(axis=1) #row wise sum\n\n\n# In[95]:\n\n\nx=np.array([1,3,2])\nx.min() #max also\n\n\n# In[96]:\n\n\nprint(x.argmin()) # gives index of min element\nprint(x.argmax())\n\n\n# In[97]:\n\n\nnp.all([True,True])\n\n\n# In[98]:\n\n\nnp.any([True,False])\n\n\n# In[99]:\n\n\n#any can be used for array comparsions\na=np.zeros((50,50))\nnp.any(a!=0)\n\n\n# In[100]:\n\n\nnp.all(a==a)\n\n\n# In[101]:\n\n\n#very important\n#so easy\na=np.array([1,2,3,2])\nb=np.array([2,2,3,2])\nc=np.array([6,4,4,5])\n\n((a<=b)& (b<=c)).all()\n\n\n# In[103]:\n\n\nx=np.array([1,2,3,1])\nprint(x.mean())\nprint(np.median(x))\nprint(np.std(x))\n\n#can also do for matrices alog axises suing axis values\n\n\n# In[104]:\n\n\ndata=np.loadtxt('populations.txt')\ndata\n\n\n# In[105]:\n\n\nyear,hares,lynxes,carrots=data.T #colums to variables\nprint(year)\n\n\n# In[106]:\n\n\npopulations=data[:,1:] #all rows but colum from 1 onwards\npopulations \n\n\n# In[107]:\n\n\npopulations.std(axis=0) #column wise\n\n\n# In[108]:\n\n\n#whcih species has max population each year \nnp.argmax(populations,axis=1) \n\n\n# In[112]:\n\n\n#broadcasting\n#very important\n\n#to add or subtract two arrays we generally do element wise \n#operation that is element wise operation\n\n#but for numpy array we can do for non similar sizes\na=np.tile(np.arange(0,40,10),(3,1))\n#here arange creates a 1d array \n#tile replicates this into 3 times into rows and 1 times to column\nprint(a)\n\nprint('\\n\\n')\na=a.T\nprint(a) #transpose of a\n\n\n# In[113]:\n\n\nb=np.array([0,1,2])\nb\n\n\n# In[114]:\n\n\na+b\n\n\n# In[142]:\n\n\na=np.arange(0,40,10)\nb=np.array([0,1,2])\n#this also doesnt work\n\n\n# In[143]:\n\n\nb=b[:,np.newaxis]\nb.shape #convert any one to matrix either a or b\n\n\n# In[144]:\n\n\na+b\n\n\n# In[146]:\n\n\n#array shape manipulation\na=np.array([[1,2,3],[4,5,6]]) #2d array\n#flattens or converts array into 1d\na.ravel() #a doesnt change\n\n\n# In[148]:\n\n\na.T #a doesnt change\n\n\n# In[149]:\n\n\na.T.ravel()\n\n\n# In[151]:\n\n\nb=a.ravel()\nb\n\n\n# In[152]:\n\n\nb=b.reshape((2,3))\nb\n\n\n# In[159]:\n\n\nb[0,0]=100\na\n#a also changes\n# ravel always works on memory so change ill be reflected in both\n#reshape can sometimes return copy depending on case\n#example\n\na=np.zeros((3,2))\nb=a.T.reshape((3,2))\nb[0]=50\na\n#if directly use a and not a.T change will be reflected\n\n\n# In[160]:\n\n\n#adding a dimension\n\nz=np.array([1,2,3])\nz[:,np.newaxis]\n\n#reshaping\na=np.arange(4*3*2).reshape(4,3,2)\na\n#think of 3d arrray as 4 arrays of size 3*2\n\n\n# In[161]:\n\n\n#resizing\n\na=np.arange(4)\na.resize((8,))\na\n#concatenates with zero for remanining values\n\n\n# In[162]:\n\n\n#problem with resizing\n\nb=a\na.resize((4,))\n#because referenced cant be resized\n\n\n# In[163]:\n\n\n#sorting \n\na=np.array([[5,4,6],[2,3,2]])\nb=np.sort(a,axis=1)\nb #rowwise\n\n\n# In[164]:\n\n\n#sorting original array a\n#inplace sorting algo\na.sort(axis=0)\na\n\n\n# In[166]:\n\n\n#fancy indexing\na=np.array([4,3,1,2])\nj=np.argsort(a)\n#tells the index accd to old array in the sorted array\nj\n\n\n# In[167]:\n\n\na[j] #gives sorted array values\n\n" } ]
2
domingoesteban/robolearn
https://github.com/domingoesteban/robolearn
bc58278fe38894f4ca9ec9e657ee13a479a368b7
0d20125425c352b80ef2eeed1c0b11ab6497b11a
88be4d5657d19462eb1d74d2d4d98180b423a889
refs/heads/master
2020-04-15T22:38:25.343229
2019-01-29T17:01:42
2019-01-29T17:01:42
165,080,647
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5708354711532593, "alphanum_fraction": 0.598728597164154, "avg_line_length": 39.14583206176758, "blob_id": "980203991971357b024b56c8a7f70d426e2dd30d", "content_id": "9e2a3bbd66c145fd57c5b57160ea3751e2fe5664", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7708, "license_type": "permissive", "max_line_length": 162, "num_lines": 192, "path": "/scenarios/tests/load_plot_good_bad_errors.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import MaxNLocator\nimport pickle\nimport math\nimport os, sys\nfrom robolearn.old_utils.plot_utils import plot_sample_list, plot_sample_list_distribution, lqr_forward, plot_3d_gaussian\nfrom robolearn.old_algos.gps.gps_utils import IterationData\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.traj_opt.traj_opt_utils import traj_distr_kl, traj_distr_kl_alt\nimport scipy.stats\n\ngps_directory_name = 'GPS_2017-09-10_15:30:24' # Normal Sunday 10/09 | new init_pos\n#gps_directory_name = 'GPS_2017-09-10_19:10:07' # G/B Sunday 10/09 | new init_pos\n\ngps_directory_names = ['GPS_2017-09-12_07:01:16', 'GPS_2017-09-11_15:25:19', 'GPS_2017-09-13_07:24:42']\ngps_models_labels = ['MDGPS', 'B-MDGPS', 'D-MDGPS']\ngps_models_line_styles = [':', '--', '-']\n\ninit_itr = 0\nfinal_itr = 2\n#final_itr = 30\nsamples_idx = None # List of samples / None: all samples\nmax_traj_plots = None # None, plot all\nlast_n_iters = None # None, plot all iterations\nsensed_joints = 'RA'\nmethod = 'MDGPS_MDREPS'\n\niteration_data_options = {\n 'plot_errors': True,\n}\n\neta_color = 'black'\ncs_color = 'red'\nstep_mult_color = 'red'\nsample_list_cols = 3\nplot_sample_list_max_min = False\nplot_joint_limits = True\ngps_num = 0\n\nload_iteration_data = True\n\n#iteration_data_options = [value for key, value in options.items() if key not in duality_data_options+policy_different_options]\n\ngps_path = '/home/desteban/workspace/robolearn/scenarios/robolearn_log/' + gps_directory_name\n\niteration_data_list = list()\ngood_duality_info_list = list()\ngood_trajectories_info_list = list()\nbad_duality_info_list = list()\nbad_trajectories_info_list = list()\niteration_ids = list()\npol_sample_lists_costs = list()\npol_sample_lists_cost_compositions = list()\n\nmax_available_itr = None\nfor pp in range(init_itr, final_itr):\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n max_available_itr = pp\n\nif max_available_itr is not None:\n print(\"Max available iterations: %d\" % max_available_itr)\n\n if last_n_iters is not None:\n init_itr = max(max_available_itr - last_n_iters + 1, 0)\n\n if max_traj_plots is not None:\n if max_available_itr > max_traj_plots:\n itr_to_load = np.linspace(init_itr, max_available_itr, max_traj_plots, dtype=np.uint8)\n else:\n itr_to_load = range(init_itr, max_available_itr+1)\n\n else:\n itr_to_load = range(init_itr, max_available_itr+1)\n\n print(\"Iterations to load: %s\" % itr_to_load)\n for pp in itr_to_load:\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n print('Loading GPS iteration_data from iteration %d' % pp)\n iteration_data_list.append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() +'_iteration_data_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n iteration_ids.append(pp)\n\nif load_iteration_data:\n data_list_with_data = iteration_data_list\n if not data_list_with_data:\n raise AttributeError(\"No data has been loaded. Check that files exist\")\n T = iteration_data_list[-1][-1].sample_list.get_actions(samples_idx).shape[1]\nelse:\n raise ValueError(\"NO data has been loaded!\")\n\n# total_cond = len(pol_sample_lists_costs[0])\ntotal_itr = len(data_list_with_data)\ntotal_cond = len(data_list_with_data[0])\ncolormap = plt.cm.rainbow # nipy_spectral, Set1, Paired, winter\n\njoint_limits = [bigman_params['joints_limits'][ii] for ii in bigman_params['joint_ids'][sensed_joints]]\n\nif False:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_actions(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Actions | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n actions = iteration_data_list[itr][cond].sample_list.get_actions(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(actions.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n #ax.set_xlim([0, actions.shape[2]])\n #ax.set_ylim([ymin, ymax])\n\n if plot_sample_list_max_min:\n ax.fill_between(range(actions.mean(axis=0).shape[0]), actions.min(axis=0)[:, ii],\n actions.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif True:\n error_x = 0.05\n error_y = 0.05\n error_z = 0.05\n error_R = 0.05\n error_P = 0.05\n error_Y = 0.05\n max_error_drill = np.array([0.05, 0.05, 0.05, 0.05, 0.05, 0.05])\n\n indeces_drill = np.array([27, 28, 29, 30, 31, 32])\n\n for cond in range(total_cond):\n N = iteration_data_list[0][cond].sample_list.get_states(samples_idx).shape[-3]\n dData = iteration_data_list[0][cond].sample_list.get_states(samples_idx).shape[-1]\n fig, axs = plt.subplots(1, 1,)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('States | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n #for ii in range(axs.size):\n # ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n # ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n\n errors = np.zeros(total_itr)\n\n for itr in range(total_itr):\n states = iteration_data_list[itr][cond].sample_list.get_states(samples_idx)\n all_zs = states[:, :, indeces_drill[-1]]\n print(all_zs.shape)\n error_count = 0\n for nn in range(N):\n print(N)\n print(nn)\n if np.any(all_zs[nn, :] > max_error_drill[-1]):\n error_count += 1\n errors[itr] = error_count*100./N\n\n axs.plot(errors)\n\n ## One legend for all figures\n #legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n #legend.get_frame().set_alpha(0.4)\n\nplt.show(block=False)\n\nraw_input('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5500432848930359, "alphanum_fraction": 0.5559698939323425, "avg_line_length": 38.9388313293457, "blob_id": "6af2af686e14f54d66c4ac64600b993b6dddf083", "content_id": "bd6e0db7923e617fd887a75afe274ac7b4a4a224", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15017, "license_type": "permissive", "max_line_length": 92, "num_lines": 376, "path": "/robolearn/torch/algorithms/rl_algos/sql/multisql_prev.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Haarnoja's TensorFlow SQL implementation\n\nhttps://github.com/haarnoja/softqlearning\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n# from torch.autograd import Variable\n\nfrom collections import OrderedDict\n\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfrom robolearn.algorithms.rl_algos import IncrementalRLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.utils import eval_util\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils.samplers import InPlacePathSampler\nfrom robolearn.torch.sql.policies import MakeDeterministic\nfrom robolearn.torch.sql.kernel import adaptive_isotropic_gaussian_kernel\nfrom robolearn.torch.utils.ops import log_sum_exp\n\nEPS = 1e-6\n\n\ndef assert_shape(tensor, expected_shape):\n tensor_shape = list(tensor.shape)\n assert len(tensor_shape) == len(expected_shape)\n assert all([a == b for a, b in zip(tensor_shape, expected_shape)])\n\n\nclass MultiSQL(IncrementalRLAlgorithm, TorchAlgorithm):\n \"\"\"Soft Q-learning (SQL).\n\n\n Reference:\n [1] Tuomas Haarnoja, Haoran Tang, Pieter Abbeel, and Sergey Levine,\n \"Reinforcement Learning with Deep Energy-Based Policies,\" International\n Conference on Machine Learning, 2017. https://arxiv.org/abs/1702.08165\n \"\"\"\n def __init__(self,\n env,\n qfs,\n policies,\n exploration_pol_id=0,\n evaluation_pol_id=0,\n\n qf_lr=1e-3,\n policy_lr=1e-3,\n optimizer_class=optim.Adam,\n use_hard_updates=False,\n hard_update_period=1000,\n soft_target_tau=0.001,\n\n value_n_particles=16,\n kernel_fn=adaptive_isotropic_gaussian_kernel,\n kernel_n_particles=16,\n kernel_update_ratio=0.5,\n plotter=None,\n eval_deterministic=True,\n **kwargs):\n \"\"\"\n\n Args:\n env:\n qf (`robolearn.PyTorchModule`): Q-function approximator.\n policy (`robolearn.PyTorchModule`):\n qf_lr (`float`): Learning rate used for the Q-function approximator.\n use_hard_updates (`bool`): Use a hard rather than soft update.\n hard_update_period (`int`): How many gradient steps before copying\n the parameters over. Used if `use_hard_updates` is True.\n soft_target_tau (`float`): Soft target tau to update target QF.\n Used if `use_hard_updates` is False.\n value_n_particles (`int`): The number of action samples used for\n estimating the value of next state.\n kernel_fn (function object): A function object that represents\n a kernel function.\n kernel_n_particles (`int`): Total number of particles per state\n used in SVGD updates.\n plotter (`MultiQFPolicyPlotter`): Plotter instance to be used for\n visualizing Q-function during training.\n eval_deterministic: Evaluate with deterministic version of current\n _i_policy.\n **kwargs:\n \"\"\"\n self._n_demons = len(qfs)\n if eval_deterministic:\n eval_policies = [MakeDeterministic(policy) for policy in policies]\n else:\n eval_policies = policies\n super().__init__(\n env=env,\n exploration_policy=policies[exploration_pol_id],\n eval_policy=eval_policies[evaluation_pol_id],\n **kwargs\n )\n self.policies = policies\n self.qfs = qfs\n self.target_qfs = [qf.copy() for qf in self.qfs]\n self.plotter = plotter\n\n # Env data\n self._action_dim = self.explo_env.action_space.low.size\n self._obs_dim = self.explo_env.observation_space.low.size\n\n # Optimize Q-fcn\n self.qf_optimizers = [optimizer_class(qf.parameters(), lr=qf_lr,)\n for qf in self.qfs]\n self._value_n_particles = value_n_particles\n\n # Optimize Policy\n self.policy_optimizers = [optimizer_class(policy.parameters(),\n lr=policy_lr,)\n for policy in self.policies]\n\n self._kernel_n_particles = kernel_n_particles\n self._kernel_update_ratio = kernel_update_ratio\n self._kernel_fn = kernel_fn\n\n # Optimize target Q-fcn\n self.use_hard_updates = use_hard_updates\n self.hard_update_period = hard_update_period\n self.soft_target_tau = soft_target_tau\n\n\n # MULTI - specific\n self.eval_samplers = [\n InPlacePathSampler(env=env, policy=eval_policy,\n total_samples=self.num_steps_per_eval + self.max_path_length,\n max_path_length=self.max_path_length, )\n for eval_policy in self.policies\n ]\n\n def pretrain(self):\n # Math target Qfcn with current one\n for demon in range(self._n_demons):\n self._update_target_q_fcn(demon)\n\n def _do_training(self):\n batch = self.get_batch()\n\n # Update Networks\n\n # print('n_step', self._n_total_train_steps)\n for demon in range(self._n_demons):\n bellman_residual = self._update_q_fcn(batch, demon)\n surrogate_cost = self._update_policy(batch, demon)\n self._update_target_q_fcn(demon)\n\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n self.eval_statistics['[%d] Bellman Residual (QFcn)' % demon] = \\\n np.mean(ptu.get_numpy(bellman_residual))\n self.eval_statistics['[%d] Surrogate Reward (Policy)' % demon] = \\\n np.mean(ptu.get_numpy(surrogate_cost))\n\n def _update_q_fcn(self, batch, demon):\n \"\"\"\n Q-fcn update\n Args:\n batch:\n\n Returns:\n\n \"\"\"\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n rewards = batch['rewards']\n # THE REWARD FOR THIS DEMON IS THE CORRESPONDING REWARD VECTOR\n rewards = batch['reward_vectors'][:, demon].unsqueeze(-1)\n terminals = batch['terminals']\n n_batch = obs.shape[0]\n\n # The value of the next state is approximated with uniform samples.\n uniform_dist = torch.distributions.Uniform(ptu.FloatTensor([-1.0]),\n ptu.FloatTensor([1.0]))\n target_actions = uniform_dist.sample((self._value_n_particles,\n self._action_dim)).squeeze()\n # target_actions = (-1 - 1) * torch.tensor(torch.rand(self._value_n_particles,\n # self._action_dim)) \\\n # + 1\n\n q_value_targets = \\\n self.target_qfs[demon](\n next_obs.unsqueeze(1).expand(n_batch,\n self._value_n_particles,\n self._obs_dim),\n target_actions.unsqueeze(0).expand(n_batch,\n self._value_n_particles,\n self._action_dim)\n ).squeeze()\n assert_shape(q_value_targets, [n_batch, self._value_n_particles])\n\n q_values = self.qfs[demon](obs, actions).squeeze()\n assert_shape(q_values, [n_batch])\n\n # Equation 10: 'Empirical' Vsoft\n next_value = log_sum_exp(q_value_targets.squeeze(), dim=1)\n assert_shape(next_value, [n_batch])\n\n # Importance _weights add just a constant to the value.\n next_value -= torch.log(ptu.FloatTensor([self._value_n_particles]))\n next_value += self._action_dim * np.log(2)\n\n # \\hat Q in Equation 11\n ys = (self.reward_scale * rewards.squeeze() + # Current reward\n # ys = (rewards.squeeze() + # IT IS NOT NECESSARY TO SCALE REWARDS (ALREADY DONE)\n (1 - terminals.squeeze()) * self.discount * next_value # Future return\n ).detach() # TODO: CHECK IF I AM DETACHING GRADIENT!!!\n assert_shape(ys, [n_batch])\n\n # Equation 11:\n bellman_residual = 0.5 * torch.mean((ys - q_values) ** 2)\n\n # Gradient descent on _i_policy parameters\n self.qf_optimizers[demon].zero_grad() # Zero all model var grads\n bellman_residual.backward() # Compute gradient of surrogate_loss\n self.qf_optimizers[demon].step() # Update model vars\n\n return bellman_residual\n\n def _update_policy(self, batch, demon):\n \"\"\"\n Policy update: SVGD\n Returns:\n\n \"\"\"\n obs = batch['observations']\n next_obs = batch['next_observations']\n n_batch = obs.shape[0]\n\n actions = self.policies[demon](\n obs.unsqueeze(1).expand(n_batch,\n self._kernel_n_particles,\n self._obs_dim)\n )\n # actions = actions[0] # For policies that return tuple\n assert_shape(actions,\n [n_batch, self._kernel_n_particles, self._action_dim])\n\n # SVGD requires computing two empirical expectations over actions\n # (see Appendix C1.1.). To that end, we first sample a single set of\n # actions, and later split them into two sets: `fixed_actions` are used\n # to evaluate the expectation indexed by `j` and `updated_actions`\n # the expectation indexed by `i`.\n n_updated_actions = \\\n int(self._kernel_n_particles*self._kernel_update_ratio)\n n_fixed_actions = self._kernel_n_particles - n_updated_actions\n\n fixed_actions, updated_actions \\\n = torch.split(actions, [n_fixed_actions, n_updated_actions], dim=1)\n # Equiv: fixed_actions = tf.stop_gradient(fixed_actions)\n fixed_actions = torch.tensor(fixed_actions.detach(), requires_grad=True)\n assert_shape(fixed_actions,\n [n_batch, n_fixed_actions, self._action_dim])\n assert_shape(updated_actions,\n [n_batch, n_updated_actions, self._action_dim])\n\n svgd_target_values = \\\n (self.qfs[demon](next_obs.unsqueeze(1).expand(n_batch,\n n_fixed_actions,\n self._obs_dim),\n fixed_actions)).squeeze()\n\n # Target log-density. Q_soft in Equation 13:\n squash_correction = torch.sum(torch.log(1 - fixed_actions**2 + EPS),\n dim=-1)\n log_p = svgd_target_values + squash_correction\n\n # Backward log_p\n grad_log_p = torch.autograd.grad(log_p,\n fixed_actions,\n grad_outputs=torch.ones_like(log_p),\n create_graph=False)[0]\n grad_log_p = torch.unsqueeze(grad_log_p, dim=2)\n assert_shape(grad_log_p,\n [n_batch, n_fixed_actions, 1, self._action_dim])\n\n kernel_dict = self._kernel_fn(xs=fixed_actions,\n ys=updated_actions)\n\n # Kernel function in Eq. 13:\n kappa = torch.unsqueeze(kernel_dict['output'], dim=3)\n assert_shape(kappa,\n [n_batch, n_fixed_actions, n_updated_actions, 1])\n\n # Stein Variational Gradient in Eq. 13:\n action_gradients = \\\n torch.mean(kappa * grad_log_p + kernel_dict['gradient'], dim=1)\n assert_shape(action_gradients,\n [n_batch, n_updated_actions, self._action_dim])\n\n # Propagate the gradient through the _i_policy network (Equation 14).\n gradients = torch.autograd.grad(updated_actions,\n self.policies[demon].parameters(),\n grad_outputs=action_gradients,\n create_graph=False)\n\n # TODO: Check a better way to do this\n for pp, (w, g) in enumerate(zip(self.policies[demon].parameters(),\n gradients)):\n if pp == 0:\n surrogate_loss = torch.sum(w*g)\n else:\n surrogate_loss += torch.sum(w*g)\n\n # Gradient descent on _i_policy parameters\n self.policy_optimizers[demon].zero_grad() # Zero all model var grads\n (-surrogate_loss).backward() # Compute gradient of surrogate_loss\n self.policy_optimizers[demon].step() # Update model vars\n\n return -surrogate_loss\n\n def _update_target_q_fcn(self, demon):\n if self.use_hard_updates:\n # print(self._n_total_train_steps, self.hard_update_period)\n if self._n_total_train_steps % self.hard_update_period == 0:\n ptu.copy_model_params_from_to(self.qfs[demon],\n self.target_qfs[demon])\n else:\n ptu.soft_update_from_to(self.qfs[demon], self.target_qfs[demon],\n self.soft_target_tau)\n\n @property\n def torch_models(self):\n return self.policies + self.qfs + self.target_qfs\n\n def get_epoch_snapshot(self, epoch):\n if self.plotter is not None:\n self.plotter.draw()\n self.plotter.save_figure(epoch)\n\n snapshot = super().get_epoch_snapshot(epoch)\n snapshot.update(\n qfs=self.qfs,\n policy=self.eval_policy,\n trained_policies=self.policies,\n target_qfs=self.target_qfs\n )\n return snapshot\n\n def evaluate(self, epoch):\n statistics = OrderedDict()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n test_paths = [None for _ in range(self._n_demons)]\n for demon in range(self._n_demons):\n logger.log(\"[%02d] Collecting samples for evaluation\" % demon)\n test_paths[demon] = self.eval_sampler.obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths[demon], stat_prefix=\"[%02d] Test\" % demon,\n ))\n\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n if hasattr(self.explo_env, \"log_diagnostics\"):\n print('TODO: WE NEED LOG_DIAGNOSTICS IN ENV')\n self.explo_env.log_diagnostics(test_paths[demon])\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n if self.plotter is not None:\n self.plotter.draw()\n" }, { "alpha_fraction": 0.5475404858589172, "alphanum_fraction": 0.5571271181106567, "avg_line_length": 35.78034591674805, "blob_id": "b2913597e0e37ca5ccc32340e1e53ab429d4ceba", "content_id": "9ec6892503c8fc370bf7a29990cb46862ba9dc05", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6363, "license_type": "permissive", "max_line_length": 99, "num_lines": 173, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/policies/lin_gauss_init.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy as sp\nfrom robolearn.algorithms.rl_algos import LinearGaussianPolicy\n\n\ndef init_pd(dU, dX, T, x0,\n state_idx=None,\n dstate_idx=None,\n pos_gains=0.001,\n Kp=1,\n Kv=0.001,\n init_var=0.01,\n ):\n \"\"\"\n This function initializes the linear-Gaussian controller as a\n proportional-derivative (PD) controller with Gaussian noise. The\n position gains are controlled by the variable pos_gains, velocity\n gains are controlled by pos_gains*vel_gans_mult.\n \"\"\"\n\n if not issubclass(type(pos_gains), list) \\\n and not issubclass(type(pos_gains), np.ndarray):\n pos_gains = np.tile(pos_gains, dU)\n elif len(pos_gains) == dU:\n pos_gains = pos_gains\n else:\n raise TypeError(\"noise_var_scale size (%d) does not match dU (%d)\"\n % (len(pos_gains), dU))\n\n # Choose initialization mode.\n Jac = np.zeros((dU, dX))\n\n Jac[:, state_idx] = np.eye(dU)*Kp\n if dstate_idx is not None:\n Jac[:, dstate_idx] = np.eye(dU)*Kv\n\n K = -np.diag(pos_gains).dot(Jac)\n K = np.tile(K, [T, 1, 1])\n\n # if state_to_pd == 'distance':\n # k = np.tile(-K[0, :, :].dot(x0), [T, 1])\n # else:\n k = np.tile(2*K[0, :, :].dot(x0), [T, 1])\n # k = np.tile(np.zeros(dU), [T, 1])\n\n #k = np.tile(K[0, :, :].dot(x0), [T, 1])\n PSig = init_var * np.tile(np.eye(dU), [T, 1, 1])\n cholPSig = np.sqrt(init_var) * np.tile(np.eye(dU), [T, 1, 1])\n invPSig = (1.0 / init_var) * np.tile(np.eye(dU), [T, 1, 1])\n\n max_std = np.sqrt(init_var)\n\n return LinearGaussianPolicy(K, k, PSig, cholPSig, invPSig, max_std=max_std)\n\n\n# Original code\ndef init_lqr(dU, dX, T, dt, x0, stiffness=1.0, stiffness_vel=0.5,\n init_var=0.01, final_weight=1.0, init_acc=None, init_gains=None):\n \"\"\"\n Return initial gains for a time-varying linear Gaussian controller that\n tries to hold the initial position.\n \"\"\"\n #TODO: Use packing instead of assuming which indices are the joint angles.\n\n # Notation notes:\n # L = loss, Q = q-function (dX+dU dimensional),\n # V = value function (dX dimensional), F = dynamics\n # Vectors are lower-case, matrices are upper case.\n # Derivatives: x = state, u = action, t = state+action (trajectory).\n # The time index is denoted by _t after the above.\n # Ex. Ltt_t = Loss, 2nd derivative (w.r.t. trajectory), indexed by time t.\n\n # Constants.\n idx_x = slice(dX) # Slices out state.\n idx_u = slice(dX, dX+dU) # Slices out actions.\n\n if init_acc is None:\n init_acc = np.zeros(dU)\n\n if init_gains is None:\n init_gains = np.ones(dU)\n\n # Set up simple linear dynamics model.\n Fd, fc = guess_dynamics(init_gains, init_acc, dX, dU, dt)\n\n # Setup a cost function based on stiffness.\n # Ltt = (dX+dU) by (dX+dU) - Hessian of loss with respect to trajectory at\n # a single timestep.\n Ltt = np.diag(np.hstack([stiffness * np.ones(dU),\n stiffness * stiffness_vel * np.ones(dU),\n np.zeros(dX - dU*2),\n np.ones(dU)\n ]))\n Ltt = Ltt / init_var # Cost function - quadratic term.\n lt = -Ltt.dot(np.r_[x0, np.zeros(dU)]) # Cost function - linear term.\n\n # Perform dynamic programming.\n K = np.zeros((T, dU, dX)) # Controller gains matrix.\n k = np.zeros((T, dU)) # Controller bias term.\n PSig = np.zeros((T, dU, dU)) # Covariance of noise.\n cholPSig = np.zeros((T, dU, dU)) # Cholesky decomposition.\n invPSig = np.zeros((T, dU, dU)) # Inverse of covariance.\n vx_t = np.zeros(dX) # Vx = dV/dX. Derivative of value function wrt to X at time t.\n Vxx_t = np.zeros((dX, dX)) # Vxx = ddV/dXdX at time t.\n\n # LQR backward pass.\n for t in range(T - 1, -1, -1):\n # Compute Q function at this step.\n if t == (T - 1):\n Ltt_t = final_weight * Ltt\n lt_t = final_weight * lt\n else:\n Ltt_t = Ltt\n lt_t = lt\n # Qtt = (dX+dU) by (dX+dU) 2nd Derivative of Q-function with respect to trajectory (dX+dU).\n Qtt_t = Ltt_t + Fd.T.dot(Vxx_t).dot(Fd)\n # Qt = (dX+dU) 1st Derivative of Q-function with respect to trajectory (dX+dU).\n qt_t = lt_t + Fd.T.dot(vx_t + Vxx_t.dot(fc))\n\n # Compute preceding value function.\n U = sp.linalg.cholesky(Qtt_t[idx_u, idx_u])\n L = U.T\n\n invPSig[t, :, :] = Qtt_t[idx_u, idx_u]\n PSig[t, :, :] = sp.linalg.solve_triangular(\n U, sp.linalg.solve_triangular(L, np.eye(dU), lower=True)\n )\n cholPSig[t, :, :] = sp.linalg.cholesky(PSig[t, :, :])\n K[t, :, :] = -sp.linalg.solve_triangular(\n U, sp.linalg.solve_triangular(L, Qtt_t[idx_u, idx_x], lower=True)\n )\n k[t, :] = -sp.linalg.solve_triangular(\n U, sp.linalg.solve_triangular(L, qt_t[idx_u], lower=True)\n )\n Vxx_t = Qtt_t[idx_x, idx_x] + Qtt_t[idx_x, idx_u].dot(K[t, :, :])\n vx_t = qt_t[idx_x] + Qtt_t[idx_x, idx_u].dot(k[t, :])\n Vxx_t = 0.5 * (Vxx_t + Vxx_t.T)\n\n max_std = np.sqrt(init_var)\n\n return LinearGaussianPolicy(K, k, PSig, cholPSig, invPSig, max_std=max_std)\n\n\ndef guess_dynamics(gains, acc, dX, dU, dt):\n \"\"\"\n Initial guess at the model using position-velocity assumption.\n Note: This code assumes joint positions occupy the first dU state\n indices and joint velocities occupy the next dU.\n Args:\n gains: dU dimensional joint gains.\n acc: dU dimensional joint acceleration.\n dX: Dimensionality of the state.\n dU: Dimensionality of the action.\n dt: Length of a time step.\n Returns:\n Fd: A dX by dX+dU transition matrix.\n fc: A dX bias vector.\n \"\"\"\n #TODO: Use packing instead of assuming which indices are the joint\n # angles.\n Fd = np.vstack([\n np.hstack([\n np.eye(dU), dt * np.eye(dU), np.zeros((dU, dX - dU*2)),\n dt ** 2 * np.diag(gains)\n ]),\n np.hstack([\n np.zeros((dU, dU)), np.eye(dU), np.zeros((dU, dX - dU*2)),\n dt * np.diag(gains)\n ]),\n np.zeros((dX - dU*2, dX+dU))\n ])\n fc = np.hstack([acc * dt ** 2, acc * dt, np.zeros((dX - dU*2))])\n return Fd, fc\n" }, { "alpha_fraction": 0.8700000047683716, "alphanum_fraction": 0.8700000047683716, "avg_line_length": 48.9375, "blob_id": "b20149e3b593d0ebe1ce99512153b792c52d9f2e", "content_id": "10941ec4a4a99c836f51ab15e8f5b5d21ac7b453", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "permissive", "max_line_length": 80, "num_lines": 16, "path": "/robolearn/torch/policies/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .mlp_policy import MlpPolicy\nfrom .tanh_mlp_policy import TanhMlpPolicy\nfrom .tanh_gaussian_policy import TanhGaussianPolicy\n\nfrom .tanh_gaussian_multi_policy import TanhGaussianMultiPolicy\nfrom .tanh_gaussian_mixture_multi_policy import TanhGaussianMixtureMultiPolicy\nfrom .tanh_gaussian_weighted_multi_policy import TanhGaussianWeightedMultiPolicy\nfrom .tanh_gaussian_composed_multi_policy import TanhGaussianComposedMultiPolicy\nfrom .tanh_gaussian_promp_multi_policy import TanhGaussianPrompMultiPolicy\nfrom .tanh_weighted_multi_policy import TanhWeightedMultiPolicy\n\nfrom .multi_policy_selector import MultiPolicySelector\nfrom .weighted_multi_policy_selector import WeightedMultiPolicySelector\nfrom .sampling_policy import SamplingPolicy\n\nfrom .lin_gauss_policy import LinearGaussianPolicy\n\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 31, "blob_id": "ba6509062a9cbe70af71666efaeaf304a08ce8a8", "content_id": "153f00d8a6cd42a27de8ab9c6737baedef0a9d89", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "permissive", "max_line_length": 31, "num_lines": 1, "path": "/robolearn/envs/simple_envs/cliff/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .cliff_env import CliffEnv\n" }, { "alpha_fraction": 0.5155635476112366, "alphanum_fraction": 0.5525065660476685, "avg_line_length": 29.3681583404541, "blob_id": "25ff8bbe2efeb4154f34aa6e6a172d04367ec72d", "content_id": "942e35188d00b12313b929a49185e72a860ee8ef", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12208, "license_type": "permissive", "max_line_length": 102, "num_lines": 402, "path": "/scripts/plot_multiple_reacher.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import plot_multiple_process_iu_returns\nfrom robolearn.utils.plots import plot_process_iu_policies\nfrom robolearn.utils.plots import plot_process_iu_values_errors\nfrom robolearn.utils.plots import plot_process_general_data\nfrom robolearn.utils.plots.learning_process_plots import plot_process_haarnoja\nimport json\n\n# SEEDS = [610, 710, 810, 1010]\nSEEDS = [610]#, 1010]#, 710, 1010]\nMAX_ITER = 190\n# STEPS_PER_ITER = 3e3\nSTEPS_PER_ITER = None\nLOG_PREFIX = '/home/desteban/logs/objective_test/reacher'\n\nfig_name_prefix = 'Reacher_'\n\n# 1: Irew=5e-1, Urew=5e-1\n# 2: Irew=5e-1, Urew=5e-1\n# 3: Irew=1e+0, Urew=5e-1 ????\n# anh: Irew=1e+0, Urew=1e+0\n# anh2: Irew=1e-1, Urew=1e-1\n# anh2: Irew=1e+1, Urew=1e-1\n# compo: Irew=1e+0, Urew=1e+0, Uscale=1e0, Iscale=1e0\n# compo2: Irew=1e+0, Urew=1e+0, Uscale=1e0, Iscale=5e-0\n# compoX3: Irew=1e+0, Urew=1e+0, Uscale=1e0, Iscale=5e-0\n\n\nhiu_performance_dict = dict()\n\"\"\"\n# Subtask 01\nhiu_performance_dict['Subtask 01'] = dict()\n# hiu_performance_dict['Subtask 01']['SAC'] = dict(\n# dir='sub0',\n# prefix='sacD_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\nhiu_performance_dict['Subtask 01']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sac_newE_5_', # i:0, u:1\n ius=[0],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Subtask 01']['HIU-SAC-E'] = dict(\n dir='sub-1',\n prefix='hiu_sac_new_prompE_5_', # i:0, u:1\n ius=[0],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Subtask 01']['HIU-SAC-M'] = dict(\n dir='sub-1',\n prefix='hiu_sac_new_mixtureE_5_', # i:0, u:1\n ius=[0],\n r_scales=[1.0e-0],\n)\n# hiu_performance_dict['Subtask 01']['HIU-SAC-W'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new5_5_',\n# ius=[0],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Subtask 01']['HIU-SAC-M'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture5_5_',\n# ius=[0],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Subtask 01']['HIU-SAC-E'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_promp5_5_',\n# ius=[0],\n# r_scales=[1.0e-0],\n# )\n\n\n# Subtask 02\nhiu_performance_dict['Subtask 02'] = dict()\n# hiu_performance_dict['Subtask 02']['SAC'] = dict(\n# dir='sub1',\n# prefix='sacD_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\nhiu_performance_dict['Subtask 02']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sac_newE_5_', # i:0, u:1\n ius=[1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Subtask 02']['HIU-SAC-E'] = dict(\n dir='sub-1',\n prefix='hiu_sac_new_prompE_5_', # i:0, u:1\n ius=[1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Subtask 02']['HIU-SAC-M'] = dict(\n dir='sub-1',\n prefix='hiu_sac_new_mixtureE_5_', # i:0, u:1\n ius=[1],\n r_scales=[1.0e-0],\n)\n# hiu_performance_dict['Subtask 02']['HIU-SAC-W'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new5_5_',\n# ius=[1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Subtask 02']['HIU-SAC-M'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture5_5_',\n# ius=[1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Subtask 02']['HIU-SAC-E'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_promp5_5_',\n# ius=[1],\n# r_scales=[1.0e-0],\n# )\n\n\n# Maintask\nhiu_performance_dict['Main Task'] = dict()\n# hiu_performance_dict['Main Task']['SACC'] = dict(\n# dir='sub-1',\n# prefix='sacC_', # tgt:-2\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Main Task']['SAC'] = dict(\n# dir='sub-1',\n# prefix='sacD_', # tgt:0\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Main Task']['SACE'] = dict(\n# dir='sub-1',\n# prefix='sacE_', # tgt:1\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Main Task']['SACF'] = dict(\n# dir='sub-1',\n# prefix='sacF_', # tgt:2\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# # hiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(\n# # dir='sub-1',\n# # prefix='hiu_sac_new5_5_',\n# # ius=[-1],\n# # r_scales=[1.0e-0],\n# # )\n# hiu_performance_dict['Main Task']['HIU-SAC-Wx'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new5B_5_',\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Main Task']['HIU-SAC-WB'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_newB_5_', # i:2, u:2\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Main Task']['HIU-SAC-WC'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_newC_5_', # i:1, u:2\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_newD_5_', # i:0, u:1\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\nhiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sac_newE_5_', # i:0, u:1\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task']['HIU-SAC-E'] = dict(\n dir='sub-1',\n prefix='hiu_sac_new_prompE_5_', # i:0, u:1\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task']['HIU-SAC-M'] = dict(\n dir='sub-1',\n prefix='hiu_sac_new_mixtureE_5_', # i:0, u:1\n ius=[-1],\n r_scales=[1.0e-0],\n)\n# # # hiu_performance_dict['Main Task']['HIU-SAC-M'] = dict(\n# # # dir='sub-1',\n# # # prefix='hiu_sac_new_mixture5_5_',\n# # # ius=[-1],\n# # # r_scales=[1.0e-0],\n# # # )\n# # # hiu_performance_dict['Main Task']['HIU-SAC-E'] = dict(\n# # # dir='sub-1',\n# # # prefix='hiu_sac_new_promp5_5_',\n# # # ius=[-1],\n# # # r_scales=[1.0e-0],\n# # # )\n# # hiu_performance_dict['Main Task']['HIU-SAC-W6'] = dict(\n# # dir='sub-1',\n# # prefix='hiu_sac_new6_5_',\n# # ius=[-1],\n# # r_scales=[1.0e-0],\n# # )\n# # # hiu_performance_dict['Main Task']['DDPG'] = dict(\n# # # dir='sub-1',\n# # # prefix='ddpg_',\n# # # ius=[-1],\n# # # r_scales=[1.0e-0],\n# # # )\n\"\"\"\n# hiu_performance_dict['Sub Task 1'] = dict()\n# hiu_performance_dict['Sub Task 1']['SAC-std_tanh_ini_xav_ind_opt_amsgrad_vf_imp'] = dict(\n# dir='sub0',\n# prefix='sac_like_spinningupM_', # tgt:-2\n# ius=[-1],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Sub Task 1']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp5'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_spinningupA_5_',\n# ius=[0],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Sub Task 1']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_spinningupA_1_',\n# ius=[0],\n# r_scales=[1.0e-0],\n# )\n# hiu_performance_dict['Sub Task 1']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1_newpol'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_spinningupB_1_',\n# ius=[0],\n# r_scales=[1.0e-0],\n# )\n#\nhiu_performance_dict['Sub Task 2'] = dict()\nhiu_performance_dict['Sub Task 2']['SAC-std_tanh_ini_xav_ind_opt_amsgrad_vf_imp'] = dict(\n dir='sub1',\n prefix='sac_like_spinningupM_', # tgt:-2\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Sub Task 2']['hiu_sac_spinningupE_1_'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupE_1_', # Sin clip variance de compound\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Sub Task 2']['hiu_sac_spinningupF_1_'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupF_1_', # Usando tgt_ent\n ius=[1],\n r_scales=[1.0e-0],\n)\n\nhiu_performance_dict['Main Task 2'] = dict()\nhiu_performance_dict['Main Task 2']['SAC-std_tanh_ini_xav_ind_opt_amsgrad_vf_imp'] = dict(\n dir='sub-1',\n prefix='sac_like_spinningupM_', # tgt:-2\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task 2']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp5'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupA_5_',\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task 2']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupA_1_',\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task 2']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1_newpol'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupB_1_',\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task 2']['hiu_sac_spinningupD_1_'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupD_1_', # Con clip variance de compound\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task 2']['hiu_sac_spinningupE_1_'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupE_1_', # Sin clip variance de compound\n ius=[-1],\n r_scales=[1.0e-0],\n)\nhiu_performance_dict['Main Task 2']['hiu_sac_spinningupF_1_'] = dict(\n dir='sub-1',\n prefix='hiu_sac_spinningupF_1_', # Usando tgt_ent\n ius=[-1],\n r_scales=[1.0e-0],\n)\n\n\ndef get_full_seed_paths(full_dict):\n categories = list(full_dict.keys())\n\n for cc, cate in enumerate(categories):\n expt_dict = full_dict[cate]\n expts = list(expt_dict)\n # print(expt_dict)\n expt_counter = 0\n for ee, expt in enumerate(expts):\n # print(expt['dir'])\n run_dict = expt_dict[expt]\n expt_dir = os.path.join(LOG_PREFIX, run_dict['dir'])\n if len(list_files_startswith(expt_dir, run_dict['prefix'])) > 0:\n expt_counter += 1\n dirs_and_iu = list()\n dir_prefix = os.path.join(expt_dir, run_dict['prefix'])\n # print(dir_prefix)\n for seed in SEEDS:\n full_seed_dir = dir_prefix + str(seed)\n # print('- ', full_seed_dir)\n if os.path.exists(full_seed_dir):\n # print('YES DATA IN: %s' % full_seed_dir)\n dirs_and_iu.append((\n full_seed_dir,\n run_dict['ius'],\n run_dict['r_scales'],\n ))\n full_dict[cate][expt] = dirs_and_iu\n if expt_counter == 0:\n full_dict.pop(cate)\n return full_dict\n\n\ndef list_files_startswith(directory, prefix):\n return list(f for f in os.listdir(directory) if f.startswith(prefix))\n\n\ndef list_files_endswith(directory, suffix):\n return list(f for f in os.listdir(directory) if f.endswith(suffix))\n\n\ndef main(args):\n\n directories_dict = get_full_seed_paths(hiu_performance_dict)\n\n # directories_dict = get_subtask_and_seed_idxs()\n\n plot_multiple_process_iu_returns(\n directories_dict,\n max_iter=MAX_ITER,\n steps_per_iter=STEPS_PER_ITER,\n fig_name_prefix=fig_name_prefix,\n )\n\n # # Plot according to RL algorithm\n # if algo_name in ['HIUSAC', 'SAC', 'HIUSACEpisodic']:\n # # plot_process_iu_values_errors(csv_file=args.file, n_unintentional=args.un,\n # # block=False)\n # # plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,\n # # block=False, plot_intentional=args.no_in,\n # # deterministic=False)\n # plot_multiple_process_iu_returns(csv_file=args.file, n_unintentional=args.un,\n # block=False)\n #\n # elif algo_name in ['IUWeightedMultiDDPG']:\n # # plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,\n # # block=False, plot_intentional=args.no_in,\n # # deterministic=True)\n # plot_multiple_process_iu_returns(csv_file=args.file, n_unintentional=args.un,\n # block=False)\n # else:\n # plot_process_general_data(csv_file=args.file, block=False)\n\n # plot_process_haarnoja(csv_file=args.file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # parser.add_argument('file', type=str, default='./progress.csv',\n # help='path to the progress.csv file')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n parser.add_argument('--no_in', action='store_false')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.49393928050994873, "alphanum_fraction": 0.500567615032196, "avg_line_length": 32.30121994018555, "blob_id": "d42f1330c7f1f4e87888f28f7e95d70e438876a5", "content_id": "3eaee6796f9a95b588c33e7e73d0c07e192e1e4c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27307, "license_type": "permissive", "max_line_length": 80, "num_lines": 820, "path": "/robolearn/torch/algorithms/rl_algos/sac/sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis has been adapted from Vitchyr Pong's SAC implementation.\nhttps://github.com/vitchyr/rlkit\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom collections import OrderedDict\nfrom itertools import chain\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils import eval_util\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.utils.data_management.normalizer import RunningNormalizer\n\nimport tensorboardX\n\n# MAX_LOG_ALPHA = 9.21034037 # Alpha=10000 Before 01/07\nMAX_LOG_ALPHA = 6.2146080984 # Alpha=500 From 09/07\n\n\nclass SAC(RLAlgorithm, TorchAlgorithm):\n \"\"\"\n Soft Actor Critic (SAC)\n \"\"\"\n def __init__(\n self,\n explo_env,\n policy,\n qf,\n\n replay_buffer,\n batch_size=1024,\n normalize_obs=False,\n eval_env=None,\n\n vf=None,\n qf2=None,\n action_prior='uniform',\n\n entropy_scale=1.,\n auto_alpha=True,\n tgt_entro=None,\n\n policy_lr=3e-4,\n qf_lr=3e-4,\n\n policy_mean_regu_weight=1e-3,\n policy_std_regu_weight=1e-3,\n policy_pre_activation_weight=0.,\n\n policy_weight_decay=0.,\n q_weight_decay=0.,\n\n optimizer='adam',\n # optimizer='rmsprop',\n # optimizer='sgd',\n optimizer_kwargs=None,\n\n soft_target_tau=5e-3,\n target_update_interval=1,\n\n reward_scale=1.,\n\n save_replay_buffer=False,\n eval_deterministic=True,\n log_tensorboard=False,\n **kwargs\n ):\n\n # ###### #\n # Models #\n # ###### #\n\n # Exploration Policy\n self._policy = policy\n\n # Evaluation Policy\n if eval_deterministic:\n eval_policy = MakeDeterministic(self._policy)\n else:\n eval_policy = self._policy\n\n # Observation Normalizer\n if normalize_obs:\n self._obs_normalizer = RunningNormalizer(shape=explo_env.obs_dim)\n else:\n self._obs_normalizer = None\n\n RLAlgorithm.__init__(\n self,\n explo_env=explo_env,\n explo_policy=self._policy,\n eval_env=eval_env,\n eval_policy=eval_policy,\n obs_normalizer=self._obs_normalizer,\n **kwargs\n )\n\n # Q-function(s) and V-function\n self._qf = qf\n self._qf2 = qf2\n\n if vf is None:\n self._vf = None\n self._target_vf = None\n self._target_qf1 = qf.copy()\n self._target_qf2 = None if qf2 is None else qf2.copy()\n else:\n self._vf = vf\n self._target_vf = vf.copy()\n self._target_qf1 = None\n self._target_qf2 = None\n\n # Replay Buffer\n self.replay_buffer = replay_buffer\n self.batch_size = batch_size\n self.save_replay_buffer = save_replay_buffer\n\n # Soft-update rate for target V-function\n self._soft_target_tau = soft_target_tau\n self._target_update_interval = target_update_interval\n\n # Important algorithm hyperparameters\n self._action_prior = action_prior\n self._entropy_scale = entropy_scale\n\n # Desired Alpha\n self._auto_alpha = auto_alpha\n if tgt_entro is None:\n tgt_entro = -explo_env.action_dim\n self._tgt_entro = torch.tensor([float(tgt_entro)], device=ptu.device)\n self._log_alpha = torch.zeros(1, device=ptu.device, requires_grad=True)\n\n # Reward Scale\n self.reward_scale = reward_scale\n\n # ########## #\n # Optimizers #\n # ########## #\n if optimizer.lower() == 'adam':\n optimizer_class = optim.Adam\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n amsgrad=True,\n # amsgrad=False,\n )\n elif optimizer.lower() == 'rmsprop':\n optimizer_class = optim.RMSprop\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n\n )\n else:\n raise ValueError('Wrong optimizer')\n self.qf_lr = qf_lr\n self.policy_lr = policy_lr\n\n # Q-function(s) optimizer(s)\n self._qf1_optimizer = optimizer_class(\n self._qf.parameters(),\n lr=qf_lr,\n weight_decay=q_weight_decay,\n **optimizer_kwargs\n )\n values_parameters = self._qf.parameters()\n if self._qf2 is None:\n self._qf2_optimizer = None\n else:\n self._qf2_optimizer = optimizer_class(\n self._qf2.parameters(),\n lr=qf_lr,\n weight_decay=q_weight_decay,\n **optimizer_kwargs\n )\n values_parameters = chain(values_parameters, self._qf2.parameters())\n\n # V-function optimizer\n if self._vf is None:\n self._vf_optimizer = None\n else:\n self._vf_optimizer = optimizer_class(\n self._vf.parameters(),\n lr=qf_lr,\n weight_decay=q_weight_decay,\n **optimizer_kwargs\n )\n values_parameters = chain(values_parameters, self._vf.parameters())\n self._values_optimizer = optimizer_class(\n values_parameters,\n lr=qf_lr,\n weight_decay=q_weight_decay,\n **optimizer_kwargs\n )\n\n # Policy optimizer\n self._policy_optimizer = optimizer_class(\n self._policy.parameters(),\n lr=policy_lr,\n weight_decay=policy_weight_decay,\n **optimizer_kwargs\n )\n\n # Alpha optimizer\n self._alpha_optimizer = optimizer_class(\n [self._log_alpha],\n lr=policy_lr,\n **optimizer_kwargs\n )\n\n # Weights for policy regularization coefficients\n self.pol_mean_regu_weight = policy_mean_regu_weight\n self.pol_std_regu_weight = policy_std_regu_weight\n self.pol_pre_activation_weight = policy_pre_activation_weight\n\n # Useful Variables for logging\n self.log_data = dict()\n self.log_data['Pol KL Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Qf Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Qf2 Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Vf Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Rewards'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Pol Entropy'] = np.zeros(\n self.num_train_steps_per_epoch\n )\n self.log_data['Pol Log Std'] = np.zeros((\n self.num_train_steps_per_epoch,\n self.explo_env.action_dim,\n ))\n self.log_data['Policy Mean'] = np.zeros((\n self.num_train_steps_per_epoch,\n self.explo_env.action_dim,\n ))\n self.log_data['Alphas'] = np.zeros(self.num_train_steps_per_epoch)\n\n # Tensorboard-like Logging\n self._log_tensorboard = log_tensorboard\n if log_tensorboard:\n self._summary_writer = \\\n tensorboardX.SummaryWriter(log_dir=logger.get_snapshot_dir())\n else:\n self._summary_writer = None\n\n def pretrain(self, n_pretrain_samples):\n # We do not require any pretrain (I think...)\n observation = self.explo_env.reset()\n for ii in range(n_pretrain_samples):\n action = self.explo_env.action_space.sample()\n # Interact with environment\n next_ob, reward, terminal, env_info = (\n self.explo_env.step(action)\n )\n agent_info = None\n\n # Increase counter\n self._n_env_steps_total += 1\n # Create np.array of obtained terminal and reward\n terminal = np.array([terminal])\n reward = np.array([reward])\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_ob,\n agent_info=agent_info,\n env_info=env_info,\n )\n observation = next_ob\n\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n if terminal:\n self.explo_env.reset()\n\n def _do_training(self):\n # Get batch of samples\n batch = self.get_batch()\n\n # Get data from batch\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n\n # Alpha (Entropy weight in Maximum Entropy objective)\n alpha = self._entropy_scale*torch.clamp(self._log_alpha,\n max=MAX_LOG_ALPHA).exp()\n\n # ############ #\n # Critics Step #\n # ############ #\n rewards = batch['rewards'] * self.reward_scale\n terminals = batch['terminals']\n\n if self._target_vf is None:\n # Get next actions\n next_actions, next_policy_info = self._policy(\n next_obs, return_log_prob=True\n )\n # Get next log pol\n next_log_pi = next_policy_info['log_prob']\n # Intentional Q1(s', a')\n next_q1 = self._target_qf1(next_obs, next_actions)[0]\n if self._qf2 is not None:\n # Intentional Q2(s', a')\n next_q2 = self._target_qf2(next_obs, next_actions)[0]\n # Minimum Unintentional Double-Q\n next_q = torch.min(next_q1, next_q2)\n else:\n next_q = next_q1\n # Vtarget(s')\n v_value_next = next_q - alpha*next_log_pi\n else:\n # Vtarget(s')\n v_value_next = self._target_vf(next_obs)[0]\n\n # Calculate Bellman Backup for Q-value\n q_backup = rewards + (1. - terminals) * self.discount * v_value_next\n q_backup = q_backup.detach() # Detach gradient computations\n\n # Q1(s, a)\n q1_pred = self._qf(obs, actions)[0]\n\n # QF1 Loss: Mean Squared Bellman Equation (MSBE)\n qf1_loss = 0.5 * torch.mean((q_backup - q1_pred)**2)\n\n # # Update Q1-value function\n # self._qf1_optimizer.zero_grad()\n # qf1_loss.backward()\n # self._qf1_optimizer.step()\n\n if self._qf2 is not None:\n # Q2(s, a)\n q2_pred = self._qf2(obs, actions)[0]\n\n # QF2 Loss: Mean Squared Bellman Equation (MSBE)\n qf2_loss = 0.5 * torch.mean((q_backup - q2_pred)**2)\n\n # # Update Q2-value function\n # self._qf2_optimizer.zero_grad()\n # qf2_loss.backward()\n # self._qf2_optimizer.step()\n else:\n qf2_loss = 0\n\n # ########## #\n # Actor Step #\n # ########## #\n # Calculate Policy Loss\n new_actions, policy_info = self._policy(\n obs, return_log_prob=True\n )\n log_pi = policy_info['log_prob']\n policy_mean = policy_info['mean']\n policy_log_std = policy_info['log_std']\n pre_tanh_value = policy_info['pre_tanh_value']\n\n if self._action_prior == 'normal':\n raise NotImplementedError\n else:\n policy_prior_log_probs = 0.0\n\n # Q1(s, a)\n q1_new_actions = self._qf(obs, new_actions)[0]\n\n if self._qf2 is not None:\n # Q2(s, a)\n q2_new_actions = self._qf2(obs, new_actions)[0]\n # Minimum Double-Q\n q_new_actions = torch.min(q1_new_actions, q2_new_actions)\n else:\n q_new_actions = q1_new_actions\n\n # Policy KL loss\n # policy_kl_losses = (\n # alpha*log_pi - q_new_actions - policy_prior_log_probs\n # )\n policy_kl_losses = -(\n q_new_actions - alpha*log_pi + policy_prior_log_probs\n )\n policy_kl_loss = torch.mean(policy_kl_losses)\n\n # Policy regularization loss\n mean_reg_loss = self.pol_mean_regu_weight * (policy_mean ** 2).mean()\n std_reg_loss = self.pol_std_regu_weight * (policy_log_std ** 2).mean()\n pre_activ_reg_loss = self.pol_pre_activation_weight * (\n (pre_tanh_value**2).sum(dim=1).mean()\n )\n policy_regu_loss = mean_reg_loss + std_reg_loss + pre_activ_reg_loss\n\n policy_loss = policy_kl_loss + policy_regu_loss\n\n # Update Policy\n self._policy_optimizer.zero_grad()\n policy_loss.backward()\n self._policy_optimizer.step()\n\n # ############### #\n # V-function Step #\n # ############### #\n # V(s)\n if self._vf is None:\n v_pred = q_new_actions - alpha*log_pi\n vf_loss = 0\n else:\n v_pred = self._vf(obs)[0]\n # Calculate Bellman Backup for V-value\n v_backup = q_new_actions - alpha*log_pi + policy_prior_log_probs\n v_backup = v_backup.detach()\n # Calculate Intentional Vf Loss\n vf_loss = 0.5*torch.mean((v_backup - v_pred)**2)\n\n # # Update V-value function\n # self._vf_optimizer.zero_grad()\n # vf_loss.backward()\n # self._vf_optimizer.step()\n\n # TODO: New all values optimizer\n values_loss = qf1_loss + qf2_loss + vf_loss\n self._values_optimizer.zero_grad()\n values_loss.backward()\n self._values_optimizer.step()\n\n # ###################### #\n # Update Target Networks #\n # ###################### #\n if self._n_total_train_steps % self._target_update_interval == 0:\n if self._target_vf is None:\n # Update Q-value Target Network(s)\n ptu.soft_update_from_to(\n self._qf,\n self._target_qf1,\n self._soft_target_tau\n )\n if self._target_qf2 is not None:\n ptu.soft_update_from_to(\n self._qf2,\n self._target_qf2,\n self._soft_target_tau\n )\n else:\n # Update V-value Target Network\n ptu.soft_update_from_to(\n self._vf,\n self._target_vf,\n self._soft_target_tau\n )\n\n advantages_new_actions = q_new_actions - v_pred.detach()\n\n # ##### #\n # Alpha #\n # ##### #\n if self._auto_alpha:\n log_alpha = self._log_alpha.clamp(max=MAX_LOG_ALPHA)\n alpha_loss = -torch.mean(log_alpha *\n (log_pi + self._tgt_entro).detach()\n )\n self._alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self._alpha_optimizer.step()\n\n # ############### #\n # LOG Useful Data #\n # ############### #\n step_idx = self._n_epoch_train_steps\n self.log_data['Pol Entropy'][step_idx] = \\\n ptu.get_numpy(-log_pi.mean(dim=0))\n self.log_data['Pol Log Std'][step_idx] = \\\n ptu.get_numpy(policy_log_std.mean())\n self.log_data['Policy Mean'][step_idx] = \\\n ptu.get_numpy(policy_mean.mean())\n self.log_data['Pol KL Loss'][step_idx] = \\\n ptu.get_numpy(policy_kl_loss)\n self.log_data['Qf Loss'][step_idx] = ptu.get_numpy(qf1_loss)\n if self._qf2 is not None:\n self.log_data['Qf2 Loss'][step_idx] = ptu.get_numpy(qf2_loss)\n self.log_data['Vf Loss'][step_idx] = ptu.get_numpy(vf_loss)\n self.log_data['Rewards'][step_idx] = ptu.get_numpy(rewards.mean(dim=0))\n self.log_data['Alphas'][step_idx] = ptu.get_numpy(alpha)\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'Training/qf_loss',\n ptu.get_numpy(qf1_loss),\n self._n_env_steps_total\n )\n if self._qf2 is not None:\n self._summary_writer.add_scalar(\n 'Training/qf2_loss',\n ptu.get_numpy(qf2_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/vf_loss',\n ptu.get_numpy(vf_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/avg_reward',\n ptu.get_numpy(rewards.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/avg_advantage',\n ptu.get_numpy(advantages_new_actions.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/policy_loss',\n ptu.get_numpy(policy_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/policy_entropy',\n ptu.get_numpy(-log_pi.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/policy_mean',\n ptu.get_numpy(policy_mean.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/policy_std',\n ptu.get_numpy(policy_log_std.mean().exp()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'Training/q_vals',\n ptu.get_numpy(q_new_actions.mean()),\n self._n_env_steps_total\n )\n\n if self._n_env_steps_total % 500 == 0:\n for name, param in self._policy.named_parameters():\n self._summary_writer.add_histogram(\n 'policy/'+name,\n param.data.cpu().numpy(),\n self._n_env_steps_total\n )\n self._summary_writer.add_histogram(\n 'policy_grad/'+name,\n param.grad.data.cpu().numpy(),\n self._n_env_steps_total\n )\n\n for name, param in self._qf.named_parameters():\n self._summary_writer.add_histogram(\n 'qf/'+name,\n param.data.cpu().numpy(),\n self._n_env_steps_total\n )\n self._summary_writer.add_histogram(\n 'qf_grad/'+name,\n param.grad.data.cpu().numpy(),\n self._n_env_steps_total\n )\n if self._qf2 is not None:\n for name, param in self._qf2.named_parameters():\n self._summary_writer.add_histogram(\n 'qf2/'+name,\n param.data.cpu().numpy(),\n self._n_env_steps_total\n )\n self._summary_writer.add_histogram(\n 'qf2_grad/'+name,\n param.grad.data.cpu().numpy(),\n self._n_env_steps_total\n )\n\n for name, param in self._vf.named_parameters():\n self._summary_writer.add_histogram(\n 'vf/'+name,\n param.data.cpu().numpy(),\n self._n_env_steps_total\n )\n self._summary_writer.add_histogram(\n 'vf_grad/'+name,\n param.grad.data.cpu().numpy(),\n self._n_env_steps_total\n )\n\n for name, param in self._target_vf.named_parameters():\n self._summary_writer.add_histogram(\n 'vf_target/'+name,\n param.cpu().data.numpy(),\n self._n_env_steps_total\n )\n\n def _not_do_training(self):\n return\n\n @property\n def torch_models(self):\n networks_list = [\n self._policy,\n self._qf,\n ]\n if self._qf2 is not None:\n networks_list.append(self._qf2)\n\n if self._vf is not None:\n networks_list.append(self._vf)\n networks_list.append(self._target_vf)\n else:\n networks_list.append(self._target_qf1)\n if self._qf2 is not None:\n networks_list.append(self._target_qf2)\n\n return networks_list\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Stuff to save in file.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n self._epoch_plotter.save_figure(epoch)\n\n snapshot = RLAlgorithm.get_epoch_snapshot(self, epoch)\n\n snapshot.update(\n policy=self._policy,\n qf=self._qf,\n qf2=self._qf2, # It could be None\n vf=self._vf,\n target_vf=self._target_vf,\n target_qf1=self._target_qf1,\n target_qf2=self._target_qf2,\n )\n\n if self.explo_env.online_normalization or self.explo_env.normalize_obs:\n snapshot.update(\n obs_mean=self.explo_env.obs_mean,\n obs_var=self.explo_env.obs_var,\n )\n\n # Observation Normalizer\n snapshot.update(\n obs_normalizer=self._obs_normalizer,\n )\n\n # Replay Buffer\n if self.save_replay_buffer:\n snapshot.update(\n replay_buffer=self.replay_buffer,\n )\n\n return snapshot\n\n def _update_logging_data(self):\n max_step = max(self._n_epoch_train_steps, 1)\n\n if self.eval_statistics is None:\n self.eval_statistics = OrderedDict()\n\n # Intentional info\n self.eval_statistics['[I] Policy Entropy'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol Entropy'][:max_step]\n ))\n self.eval_statistics['[I] Qf Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf Loss'][:max_step]\n ))\n if self._qf2 is not None:\n self.eval_statistics['[I] Qf2 Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf2 Loss'][:max_step]\n ))\n self.eval_statistics['[I] Vf Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Vf Loss'][:max_step]\n ))\n self.eval_statistics['[I] Pol KL Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol KL Loss'][:max_step]\n ))\n self.eval_statistics['[I] Rewards'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Rewards'][:max_step]\n ))\n self.eval_statistics['[I] Policy Std'] = \\\n np.nan_to_num(np.mean(\n np.exp(self.log_data['Pol Log Std'][:max_step])\n ))\n self.eval_statistics['[I] Policy Mean'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Policy Mean'][:max_step]\n ))\n self.eval_statistics['[I] Alphas'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Alphas'][:max_step]\n ))\n\n def evaluate(self, epoch):\n statistics = OrderedDict()\n self._update_logging_data()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n logger.log(\"Collecting samples for evaluation\")\n test_paths = self.eval_sampler.obtain_samples()\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"[I] Test\",\n ))\n\n if self._exploration_paths:\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n else:\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Exploration\",\n ))\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'Evaluation/avg_return',\n statistics['[I] Test Returns Mean'],\n self._n_epochs\n )\n\n self._summary_writer.add_scalar(\n 'Evaluation/avg_reward',\n statistics['[I] Test Rewards Mean'],\n self._n_epochs\n )\n\n if hasattr(self.explo_env, \"log_diagnostics\"):\n pass\n # # TODO: CHECK ENV LOG_DIAGNOSTICS\n # print('TODO: WE NEED LOG_DIAGNOSTICS IN ENV')\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n # Epoch Plotter\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n\n # Reset log_data\n for key in self.log_data.keys():\n self.log_data[key].fill(0)\n\n def get_batch(self):\n batch = self.replay_buffer.random_batch(self.batch_size)\n\n if self._obs_normalizer is not None:\n batch['observations'] = \\\n self._obs_normalizer.normalize(batch['observations'])\n batch['next_observations'] = \\\n self._obs_normalizer.normalize(batch['next_observations'])\n\n return batch\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_observation,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n # Update observation normalizer (if applicable)\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n RLAlgorithm._handle_step(\n self,\n observation=observation,\n action=action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _end_rollout(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n\n self.replay_buffer.terminate_episode()\n\n RLAlgorithm._end_rollout(self)\n" }, { "alpha_fraction": 0.583776593208313, "alphanum_fraction": 0.5984042286872864, "avg_line_length": 19.324323654174805, "blob_id": "1395055d1fda1f80b7ea0f8657b5646dca401272", "content_id": "e3a6ad8a679ccaa8e322cb5f8c7e270c052e091a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "permissive", "max_line_length": 50, "num_lines": 37, "path": "/examples/simple_envs/test_cliff.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from builtins import input\nfrom robolearn.envs.simple_envs import CliffEnv\n\nenv_params = dict(\n desc=None,\n map_name='4x12',\n is_slippery=True,\n reward_dict=None,\n nA=4,\n)\nenv = CliffEnv(**env_params)\n\n# for ii in range(400):\n# env.reset()\n# env.render()\n\nenv.reset()\nenv.render()\n# input('Press a key to start...')\n\nfor ii in range(50):\n action = env.action_space.sample()\n obs, reward, done, env_info = env.step(action)\n print('---'*3, ii, '---'*3)\n print('action -->', action)\n print('obs -->', obs)\n print('reward -->', reward)\n print('done -->', done)\n print('info -->', env_info)\n env.render()\n\ninput('Press a key to reset...')\n\nenv.reset()\nenv.render()\n\ninput('Press a key to close the script')\n" }, { "alpha_fraction": 0.5641564726829529, "alphanum_fraction": 0.5675221085548401, "avg_line_length": 31.561643600463867, "blob_id": "e73bd74cc328f8b791fbd66d6eaa3396f780d1ba", "content_id": "a6846f8636bc003a637f9feea1aea701f9e0d54e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2377, "license_type": "permissive", "max_line_length": 78, "num_lines": 73, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/policies/lin_gauss_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.models.policies import Policy\n\n\nclass LinearGaussianPolicy(Policy):\n \"\"\"\n Time-varying linear Gaussian policy.\n U = K*x + k + noise, where noise ~ N(0, chol_pol_covar)\n \"\"\"\n def __init__(self, K, k, pol_covar, chol_pol_covar, inv_pol_covar,\n max_std=0.1):\n action_dim = K.shape[0]\n Policy.__init__(self, action_dim=action_dim)\n\n self.T = K.shape[0]\n self.dU = K.shape[1]\n self.dX = K.shape[2]\n\n self.max_std = max_std\n self.max_var = max_std**2\n\n check_shape(k, (self.T, self.dU))\n check_shape(pol_covar, (self.T, self.dU, self.dU))\n check_shape(chol_pol_covar, (self.T, self.dU, self.dU))\n check_shape(inv_pol_covar, (self.T, self.dU, self.dU))\n\n self.K = K\n self.k = k\n self.pol_covar = pol_covar\n self.chol_pol_covar = chol_pol_covar\n self.inv_pol_covar = inv_pol_covar\n\n def get_action(self, state, t, noise):\n x = state\n u = self.K[t].dot(x) + self.k[t]\n\n # u += self.chol_pol_covar[t].T.dot(noise)\n u += (np.clip(self.chol_pol_covar[t],\n -self.max_std, self.max_std)).T.dot(noise)\n\n return u, dict()\n\n def nans_like(self):\n \"\"\"\n Returns:\n A new linear Gaussian policy object with the same dimensions\n but all values filled with NaNs.\n \"\"\"\n policy = LinearGaussianPolicy(\n np.zeros_like(self.K), np.zeros_like(self.k),\n np.zeros_like(self.pol_covar), np.zeros_like(self.chol_pol_covar),\n np.zeros_like(self.inv_pol_covar)\n )\n policy.K.fill(np.nan)\n policy.k.fill(np.nan)\n policy.pol_covar.fill(np.nan)\n policy.chol_pol_covar.fill(np.nan)\n policy.inv_pol_covar.fill(np.nan)\n return policy\n\n\ndef check_shape(value, expected_shape, name=''):\n \"\"\"\n Throws a ValueError if value.shape != expected_shape.\n Args:\n value: Matrix to shape check.\n expected_shape: A tuple or list of integers.\n name: An optional name to add to the exception message.\n AUTHOR: github.com:cbfinn/gps.git\n \"\"\"\n if value.shape != tuple(expected_shape):\n raise ValueError('Shape mismatch %s: Expected %s, got %s' %\n (name, str(expected_shape), str(value.shape)))\n" }, { "alpha_fraction": 0.6207920908927917, "alphanum_fraction": 0.6340594291687012, "avg_line_length": 26.150537490844727, "blob_id": "9c3f311c243577a03a41d7166f0575dbe427c9f9", "content_id": "b0cf1e7c747d9f3eee766b15591b8216c0c53f0e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5050, "license_type": "permissive", "max_line_length": 68, "num_lines": 186, "path": "/examples/miscellaneous/test_weighted_multipol.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom robolearn.torch.policies import TanhGaussianWeightedMultiPolicy\n\nbatch_size = 5\nobs_dim = 4\naction_dim = 3\nn_policies = 2\n\nnn_pol = TanhGaussianWeightedMultiPolicy(\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes=[42],\n unshared_hidden_sizes=[42, 42],\n unshared_mix_hidden_sizes=[42, 42, 42],\n shared_layer_norm=True,\n policies_layer_norm=True,\n mixture_layer_norm=True,\n mixing_temperature=1,\n)\n\nprint('##'*10)\nprint(nn_pol)\nprint('##'*10)\nprint('MODULE PARAMETERS:')\nfor name, p in nn_pol.named_parameters():\n print(name, p.shape)\nprint('##'*10)\nprint('SHARED PARAMETERS:')\nfor name, p in nn_pol.named_shared_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\nprint('##'*10)\nprint('MIXING PARAMETERS:')\nfor name, p in nn_pol.named_mixing_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\nprint('##'*10)\nprint('ALL POLICIES PARAMETERS:')\nfor name, p in nn_pol.named_policies_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\nprint('##'*10)\nprint('SPECIFIC POLICY PARAMETERS:')\nfor pol_idx in range(nn_pol.n_heads):\n print('--- POLICY ', pol_idx, ' ---')\n for name, p in nn_pol.named_policies_parameters(idx=pol_idx):\n print(name, p.shape)\n print(p.data)\n print('.')\nprint('##\\n'*5)\nfor param in nn_pol.parameters():\n print(param.shape)\n\nprint('##\\n'*5)\ninput(\"Press a key to start training...\")\n\n\nobs = torch.rand((batch_size, obs_dim))\n\nact_des = torch.rand((batch_size, action_dim))\n# act_des = torch.tensor([[0.1],\n# [0.1],\n# [0.3]])\nact_des = act_des.uniform_(-1, 1)\n\n# o = nn_pol(a, _val_idxs=[0], deterministic=True)\n# error = torch.sum(b0 - o[0][0])\n\nloss_fn = torch.nn.MSELoss(size_average=False)\nlearning_rate = 1e-2\noptimizer_pol = torch.optim.Adam([\n {'params': nn_pol.mixing_parameters(),\n 'lr': learning_rate},\n {'params': nn_pol.policies_parameters(),\n 'lr': learning_rate},\n {'params': nn_pol.shared_parameters(),\n 'lr': learning_rate},\n])\n\nprint('obs shape:', obs.shape)\nprint('action shape:', act_des.shape)\n\n\nshared_params_initial = list()\nfor param in nn_pol.shared_parameters():\n shared_params_initial.append(param.data.clone())\npolicies_params_initial = list()\nfor param in nn_pol.policies_parameters():\n policies_params_initial.append(param.data.clone())\nmixing_params_initial = list()\nfor param in nn_pol.mixing_parameters():\n mixing_params_initial.append(param.data.clone())\n\noutput_initial = nn_pol(obs, deterministic=True)\n\nfor tt in range(1000):\n act_pred, policy_info = nn_pol(obs, deterministic=False,\n optimize_policies=False,\n return_log_prob=True)\n\n log_pi = policy_info['log_prob']\n policy_mean = policy_info['mean']\n policy_log_std = policy_info['log_std']\n pre_tanh_value = policy_info['pre_tanh_value']\n print(log_pi)\n\n # loss = loss_fn(act_pred, act_des)\n loss = loss_fn(log_pi, act_des[:, 0].unsqueeze(dim=-1))\n # loss = loss_fn(policy_mean, act_des)\n # loss = loss_fn(policy_log_std, act_des)\n # loss = loss_fn(pre_tanh_value, act_des)\n\n print('t=', tt, '| loss=', loss.item())\n\n optimizer_pol.zero_grad()\n loss.backward()\n\n if tt == 0:\n print('Showing the gradients')\n for name, param in nn_pol.named_parameters():\n print('----')\n print(name, '\\n', param.grad)\n input('Press a key to continue training...')\n\n optimizer_pol.step()\n\n# error.backward()\n\nprint('='*10)\nprint('='*10)\noutput = nn_pol(obs, deterministic=True)\nprint('Initial output')\nfor key, val in output_initial[1].items():\n print(key, '\\n', val)\nprint('==')\nprint('Final output')\nfor key, val in output[1].items():\n print(key, '\\n', val)\nprint('action_des', act_des)\nprint('action_pred_initial', output_initial[0])\nprint('action_pred', output[0])\nprint('action_one_by_one')\nfor ii in range(batch_size):\n print(ii, '-->', nn_pol(obs[ii], deterministic=True)[0])\n\nprint('_______ DEBUG___')\nnn_pol(obs, deterministic=True, print_debug=True)\n\nprint('_______ DEBUG___POL_IDX-0')\nnn_pol(obs, deterministic=True, print_debug=True, pol_idx=0)\n\nprint('_______ DEBUG___POL_IDX-1')\nnn_pol(obs, deterministic=True, print_debug=True, pol_idx=1)\n\n\ninput('Show parameters...')\n\nprint('##\\n'*2)\n\nshared_params_final = list()\nfor param in nn_pol.shared_parameters():\n shared_params_final.append(param.data.clone())\npolicies_params_final = list()\nfor param in nn_pol.policies_parameters():\n policies_params_final.append(param.data.clone())\nmixing_params_final = list()\nfor param in nn_pol.mixing_parameters():\n mixing_params_final.append(param.data.clone())\n\nprint('##\\n'*2)\nprint('LOSS', loss)\nfor name, param in nn_pol.named_parameters():\n print('--')\n print('NAME', name)\n print('DATA', param.data)\n print('GRAD', param.grad)\n\nprint('init_shared')\nprint(shared_params_initial)\nprint('final_shared')\nprint(shared_params_final)\ninput('wuuu')\n" }, { "alpha_fraction": 0.5270439982414246, "alphanum_fraction": 0.5345911979675293, "avg_line_length": 29.576923370361328, "blob_id": "7854626212c6b37687588259e4f894b86f3fcc96", "content_id": "b918b5aee1c73feb9dff50dcb33c500bde824d90", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 795, "license_type": "permissive", "max_line_length": 80, "num_lines": 26, "path": "/robolearn/utils/stdout/progress_bar.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\n\n\nclass ProgressBar(object):\n def __init__(self, max_val, total_lines=50, bar_title=None, bar_symbol='#'):\n self.max_val = max_val\n self.total_lines = total_lines\n self.bar_symbol = bar_symbol\n\n if bar_title is None:\n bar_title = ''\n\n sys.stdout.write(bar_title + \": [\" + \"-\" * (self.total_lines-1) + \"]\" +\n chr(8) * self.total_lines)\n sys.stdout.flush()\n self.progress = 0\n\n def update(self, i):\n x = int(i * self.total_lines // self.max_val)\n sys.stdout.write(self.bar_symbol * (x - self.progress))\n sys.stdout.flush()\n self.progress = x\n\n def end(self):\n sys.stdout.write(\"#\" * (self.total_lines - self.progress - 1) + \"]\\n\")\n sys.stdout.flush()\n" }, { "alpha_fraction": 0.5476374626159668, "alphanum_fraction": 0.5639039278030396, "avg_line_length": 22.053571701049805, "blob_id": "aadd61de80de88a766d1c505a2eb93506fdbd7f1", "content_id": "81c707f3b4fe228815d882152a285aa1aa4a7ff0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1291, "license_type": "permissive", "max_line_length": 93, "num_lines": 56, "path": "/scripts/plot_multigoal_multiq_fcn.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport argparse\nimport joblib\n# import IPython\n\nfrom robolearn.envs.simple_envs.multigoal_deprecated.multigoal_q_plot_ import QFPolicyPlotter\n\n\ndef main(args):\n data = joblib.load(args.file)\n qfs = data['qfs']\n\n if args.deterministic:\n print('Using the deterministic version of the _i_policy.')\n policies = [data['_i_policy'] for _ in range(len(qfs))]\n else:\n print('Using the stochastic _i_policy.')\n policies = data['trained_policies']\n\n # q_fcn_positions = [\n # (-2.5, 0.0),\n # (0.0, 0.0),\n # (2.5, 2.5)\n # ]\n q_fcn_positions = [\n (5, 5),\n (0, 0),\n (-5, 5)\n ]\n\n # QF Plot\n plotter = QFPolicyPlotter(\n qf=qfs,\n policy=policies,\n obs_lst=q_fcn_positions,\n default_action=[np.nan, np.nan],\n n_samples=100,\n render=True,\n )\n\n plotter.draw()\n\n # IPython.embed()\n return plotter\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str,\n help='path to the snapshot file')\n parser.add_argument('--deterministic', action=\"store_true\")\n\n args = parser.parse_args()\n plotter = main(args)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.5293024182319641, "alphanum_fraction": 0.5781868696212769, "avg_line_length": 47.041748046875, "blob_id": "53eb6b7aeef539c1877564e5c8e9ffe09bd23534", "content_id": "f3403be679edd8230f870e02062c30fe3c62c54c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49484, "license_type": "permissive", "max_line_length": 174, "num_lines": 1030, "path": "/scenarios/multi-bigman-reach-drill.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\n\nimport numpy as np\nfrom robolearn.old_utils.sampler import Sampler\n\nfrom robolearn.old_agents import GPSAgent\nfrom robolearn.old_algos.gps.multi_gps import MULTIGPS\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_fk import CostFK\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\nfrom robolearn.old_costs.cost_utils import evall1l2term\nfrom robolearn.old_envs import BigmanEnv\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_dual_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.print_utils import change_print_color\nfrom robolearn.old_utils.robot_model import RobotModel\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import Reset_condition_bigman_drill_gazebo\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_bigman_drill_condition\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_drill_relative_pose\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_hand_relative_pose\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import spawn_drill_gazebo\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import task_space_torque_control_dual_demos, \\\n load_task_space_torque_control_dual_demos\nfrom robolearn.old_utils.traj_opt.traj_opt_mdreps import TrajOptMDREPS\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been kill by the user!!\")\n os._exit(1)\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\n# ################## #\n# ################## #\n# ### PARAMETERS ### #\n# ################## #\n# ################## #\nlearning_algorithm = 'MDREPS'\n# Task parameters\nTs = 0.02 # 0.01\nTinterm = 0#2\nTreach = 5#3.5\nTlift = 0.0 # 3.8\nTinter = 0 #0.5\nTend = 0 # 0.7\n# EndTime = 4 # Using final time to define the horizon\nEndTime = Tinterm + Treach + Tinter + Tlift + Tend # Using final time to define the horizon\ninit_with_demos = False\ngenerate_dual_sets = False\ndemos_dir = None # 'TASKSPACE_TORQUE_CTRL_DEMO_2017-07-21_16:32:39'\ndual_dir = None # 'DUAL_DEMOS_2017-09-07_16:10:49' #None #'DUAL_DEMOS_2017-09-07_14:20:59'\n#seed = 6 previous 04/09/17 17:30 pm\nseed = 0\n\nrandom.seed(seed)\nnp.random.seed(seed)\n\n# BOX\ndrill_x = 0.70\ndrill_y = 0.00\ndrill_z = -0.1327\ndrill_yaw = 0 # Degrees\ndrill_size = [0.1, 0.1, 0.3]\n#drill_size = [0.11, 0.11, 0.3] # Beer\nfinal_drill_height = 0.0\ndrill_relative_pose = create_drill_relative_pose(drill_x=drill_x, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw)\n\n# Robot Model (It is used to calculate the IK cost)\n#robot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'\nrobot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf_file)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\n\ntouching_drill_config = np.array([0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.,\n 0., 0., 0.,\n 0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633,\n 0., 0.,\n 0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])\n\n# ################### #\n# ################### #\n# ### ENVIRONMENT ### #\n# ################### #\n# ################### #\nchange_print_color.change('BLUE')\nprint(\"\\nCreating Bigman environment...\")\n\n# Robot configuration\ninterface = 'ros'\nbody_part_active = 'RA'\nbody_part_sensed = 'RA'\ncommand_type = 'effort'\n\nif body_part_active == 'RA':\n hand_y = -drill_size[1]/2-0.02\n hand_z = drill_size[2]/2+0.02\n hand_name = RH_name\n hand_offset = r_soft_hand_offset\nelse:\n hand_y = drill_size[1]/2+0.02\n hand_z = drill_size[2]/2+0.02\n hand_name = LH_name\n hand_offset = l_soft_hand_offset\n\nhand_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0], hand_x=0.0, hand_y=hand_y, hand_z=hand_z, hand_yaw=0)\n\n\nobject_name = 'drill'\nobject_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0], hand_x=0.0, hand_y=hand_y, hand_z=hand_z, hand_yaw=0)\n\n\nreset_condition_bigman_drill_gazebo_fcn = Reset_condition_bigman_drill_gazebo()\n\n# Target object pose\ndrill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.16, drill_y=drill_y-0.2276, drill_z=drill_z, drill_yaw=drill_yaw)\n\nobservation_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'ros_topic': '/xbotcore/bigman/joint_states',\n # 'fields': ['link_position', 'link_velocity', 'effort'],\n 'fields': ['link_position', 'link_velocity'],\n # 'joints': bigman_params['joint_ids']['UB']},\n 'joints': bigman_params['joint_ids'][body_part_sensed]},\n\n {'name': 'prev_cmd',\n 'type': 'prev_cmd'},\n\n # {'name': 'ft_right_arm',\n # 'type': 'ft_sensor',\n # 'ros_topic': '/xbotcore/bigman/ft/r_arm_ft',\n # 'fields': ['force', 'torque']},\n\n {'name': 'distance_hand',\n 'type': 'fk_pose',\n 'body_name': hand_name,\n 'body_offset': hand_offset,\n 'target_offset': hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n {'name': 'distance_object',\n 'type': 'object_pose',\n 'body_name': object_name,\n #'target_rel_pose': drill_relative_pose,\n 'target_rel_pose': drill_pose3,\n 'fields': ['orientation', 'position']},\n ]\n\nstate_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'fields': ['link_position', 'link_velocity'],\n 'joints': bigman_params['joint_ids'][body_part_sensed]},\n\n {'name': 'prev_cmd',\n 'type': 'prev_cmd'},\n\n {'name': 'distance_hand',\n 'type': 'fk_pose',\n 'body_name': hand_name,\n 'body_offset': hand_offset,\n 'target_offset': hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n {'name': 'distance_object',\n 'type': 'object_pose',\n 'body_name': object_name,\n #'target_rel_pose': drill_relative_pose,\n 'target_rel_pose': drill_pose3,\n 'fields': ['orientation', 'position']},\n ]\n\noptional_env_params = {\n 'temp_object_name': 'drill'\n}\n\n# Spawn Box first because it is simulation\nspawn_drill_gazebo(drill_relative_pose, drill_size=drill_size)\n\n\n# Create a BIGMAN ROS EnvInterface\nbigman_env = BigmanEnv(interface=interface, mode='simulation',\n body_part_active=body_part_active, command_type=command_type,\n observation_active=observation_active,\n state_active=state_active,\n cmd_freq=int(1/Ts),\n robot_dyn_model=robot_model,\n optional_env_params=optional_env_params,\n reset_simulation_fcn=reset_condition_bigman_drill_gazebo_fcn)\n # reset_simulation_fcn=reset_condition_bigman_drill_gazebo)\n\naction_dim = bigman_env.action_dim\nstate_dim = bigman_env.state_dim\nobservation_dim = bigman_env.obs_dim\n\nprint(\"Bigman Environment OK. body_part_active:%s (action_dim=%d). Command_type:%s\" % (body_part_active, action_dim,\n command_type))\n\n# ################# #\n# ################# #\n# ##### AGENT ##### #\n# ################# #\n# ################# #\nchange_print_color.change('CYAN')\nprint(\"\\nCreating Bigman Agent...\")\n\npolicy_params = [\n {\n 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n 'network_params': {\n 'n_layers': 2, # Hidden layers??\n 'dim_hidden': [40, 40], # List of size per n_layers\n 'obs_names': bigman_env.get_obs_info()['names'],\n 'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n },\n # Initialization.\n 'init_var': 0.1, # Initial policy variance.\n 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # Solver hyperparameters.\n 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n 'batch_size': 15,\n 'lr': 0.001, # Base learning rate (by default it's fixed).\n 'lr_policy': 'fixed', # Learning rate policy.\n 'momentum': 0.9, # Momentum.\n 'weight_decay': 0.005, # Weight decay.\n 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # set gpu usage.\n 'use_gpu': 1, # Whether or not to use the GPU for training.\n 'gpu_id': 0,\n 'random_seed': 1,\n 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n 'gpu_mem_percentage': 0.2,\n # 'weights_file_prefix': EXP_DIR + 'policy',\n },\n # {\n # 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n # 'network_params': {\n # 'n_layers': 1, # Hidden layers??\n # 'dim_hidden': [40], # List of size per n_layers\n # 'obs_names': bigman_env.get_obs_info()['names'],\n # 'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n # },\n # # Initialization.\n # 'init_var': 0.1, # Initial policy variance.\n # 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # # Solver hyperparameters.\n # 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n # 'batch_size': 15,\n # 'lr': 0.001, # Base learning rate (by default it's fixed).\n # 'lr_policy': 'fixed', # Learning rate policy.\n # 'momentum': 0.9, # Momentum.\n # 'weight_decay': 0.005, # Weight decay.\n # 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # # set gpu usage.\n # 'use_gpu': 1, # Whether or not to use the GPU for training.\n # 'gpu_id': 0,\n # 'random_seed': 1,\n # 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n # 'gpu_mem_percentage': 0.2,\n # # 'weights_file_prefix': EXP_DIR + 'policy',\n # },\n # {\n # 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n # 'network_params': {\n # 'n_layers': 1, # Hidden layers??\n # 'dim_hidden': [40], # List of size per n_layers\n # 'obs_names': bigman_env.get_obs_info()['names'],\n # 'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n # },\n # # Initialization.\n # 'init_var': 0.1, # Initial policy variance.\n # 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # # Solver hyperparameters.\n # 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n # 'batch_size': 15,\n # 'lr': 0.001, # Base learning rate (by default it's fixed).\n # 'lr_policy': 'fixed', # Learning rate policy.\n # 'momentum': 0.9, # Momentum.\n # 'weight_decay': 0.005, # Weight decay.\n # 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # # set gpu usage.\n # 'use_gpu': 1, # Whether or not to use the GPU for training.\n # 'gpu_id': 0,\n # 'random_seed': 1,\n # 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n # 'gpu_mem_percentage': 0.2,\n # # 'weights_file_prefix': EXP_DIR + 'policy',\n # },\n ]\nbigman_agents = list()\nfor pp, pol_param in enumerate(policy_params):\n policy_opt = {\n 'type': PolicyOptTf,\n 'hyperparams': pol_param\n }\n\n bigman_agents.append(GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy_opt=policy_opt,\n agent_name=\"bigman_agent\"+str(pp)))\n print(\"Bigman Agent:%s OK\\n\" % type(bigman_agents[-1]))\nprint(\"TOTAL BIGMAN AGENTS: %d\" % len(bigman_agents))\n\n\n# ################# #\n# ################# #\n# ##### COSTS ##### #\n# ################# #\n# ################# #\n# Action Cost\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(action_dim) * 1e-4,\n 'target': None, # Target action value\n}\n\n# State Cost\ntarget_distance_hand = np.zeros(6)\n# target_distance_hand[-2] = -0.02 # Yoffset\n# target_distance_hand[-1] = 0.1 # Zoffset\n\ntarget_distance_object = np.zeros(6)\nstate_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 0.0, # Weight for l2 norm\n 'alpha': 1e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'distance_object': {\n # 'wp': np.ones_like(target_state), # State weights - must be set.\n 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None, # (12, 3),\n 'data_idx': bigman_env.get_state_info(name='distance_object')['idx']\n },\n },\n}\nstate_final_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 0.0, # Weight for l2 norm\n 'alpha': 1e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10.0, # Weight multiplier on final time step.\n 'data_types': {\n 'distance_object': {\n # 'wp': np.ones_like(target_state), # State weights - must be set.\n 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None, # (12, 3),\n 'data_idx': bigman_env.get_state_info(name='distance_object')['idx']\n },\n },\n}\n\nfk_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nfk_l1_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nfk_l2_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nfk_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n #'wp': np.array([1.0, 1.0, 1.0, 10.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 50,\n}\n\nfk_l1_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 50,\n}\n\nfk_l2_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 50,\n}\n\ncost_sum = {\n 'type': CostSum,\n # 'costs': [act_cost, state_cost_distance],\n # 'weights': [1.0e-2, 1.0e-0],\n # 'costs': [act_cost, LAfk_cost, RAfk_cost, state_cost],\n # 'weights': [1.0e-2, 1.0e-0, 1.0e-0, 5.0e-1],\n #'costs': [act_cost, LAfk_cost, LAfk_final_cost],\n #'weights': [1.0e-1, 1.0e-0, 1.0e-0],\n 'costs': [act_cost, fk_l1_cost, fk_l2_cost, fk_l1_final_cost, fk_l2_final_cost, state_cost_distance, state_final_cost_distance],\n 'weights': [1.0e-1, 1.5e-1, 1.0e-0, 1.5e-1, 1.0e-0, 5.0e+0, 1.0e+1],\n # 'costs': [act_cost, state_cost],#, LAfk_cost, RAfk_cost],\n # 'weights': [0.1, 5.0],\n}\n\n\n# ########## #\n# ########## #\n# Conditions #\n# ########## #\n# ########## #\ndrill_relative_poses = [] # Used only in dual demos\n\n# q0 = np.zeros(31)\n# q0[15] = np.deg2rad(25)\n# q0[16] = np.deg2rad(40)\n# q0[18] = np.deg2rad(-75)\n# #q0[15:15+7] = [0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633]\n# q0[24] = np.deg2rad(25)\n# q0[25] = np.deg2rad(-40)\n# q0[27] = np.deg2rad(-75)\n# #q0[24:24+7] = [0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633]\n# drill_pose0 = drill_relative_pose.copy()\n# condition0 = create_bigman_drill_condition(q0, drill_pose0, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition0)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose0)\n# drill_relative_poses.append(drill_pose0)\n\n# # q1 = q0.copy()\n# q1 = np.zeros(31)\n# q1[15] = np.deg2rad(25)\n# q1[16] = np.deg2rad(40)\n# q1[18] = np.deg2rad(-45)\n# q1[20] = np.deg2rad(-5)\n# q1[24] = np.deg2rad(25)\n# q1[25] = np.deg2rad(-40)\n# q1[27] = np.deg2rad(-45)\n# q1[29] = np.deg2rad(-5)\n# drill_pose1 = create_drill_relative_pose(drill_x=drill_x+0.02, drill_y=drill_y+0.02, drill_z=drill_z, drill_yaw=drill_yaw+5)\n# condition1 = create_bigman_drill_condition(q1, drill_pose1, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition1)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose1)\n# drill_relative_poses.append(drill_pose1)\n\n# # q2 = q0.copy()\n# q2 = np.zeros(31)\n# q2[15] = np.deg2rad(25)\n# q2[16] = np.deg2rad(30)\n# q2[18] = np.deg2rad(-50)\n# q2[21] = np.deg2rad(-45)\n# q2[24] = np.deg2rad(25)\n# q2[25] = np.deg2rad(-30)\n# q2[27] = np.deg2rad(-50)\n# q2[30] = np.deg2rad(-45)\n# drill_pose2 = create_drill_relative_pose(drill_x=drill_x-0.02, drill_y=drill_y-0.02, drill_z=drill_z, drill_yaw=drill_yaw-5)\n# condition2 = create_bigman_drill_condition(q2, drill_pose2, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition2)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose2)\n# drill_relative_poses.append(drill_pose2)\n\n# q3 = q0.copy()\nq3 = np.zeros(31)\nq3[15] = np.deg2rad(10)\nq3[16] = np.deg2rad(10)\nq3[18] = np.deg2rad(-35)\n# q3[24] = np.deg2rad(10)\n# q3[25] = np.deg2rad(-10)\n# q3[27] = np.deg2rad(-35)\n# q3[24] = np.deg2rad(-10)\n# #q3[25] = np.deg2rad(-20)\n# #q3[25] = np.deg2rad(-10)\n# q3[25] = np.deg2rad(-30)\n# q3[26] = np.deg2rad(0)\n# q3[27] = np.deg2rad(-85)\n# q3[28] = np.deg2rad(0)\n# q3[29] = np.deg2rad(0)\n# q3[30] = np.deg2rad(0)\n\n# PUSH\nq3[24] = np.deg2rad(20)\nq3[25] = np.deg2rad(-55)\nq3[26] = np.deg2rad(0)\nq3[27] = np.deg2rad(-95)\nq3[28] = np.deg2rad(0)\nq3[29] = np.deg2rad(0)\nq3[30] = np.deg2rad(0)\n\n# REACH FROM TOP +0.3\n#q3[24] = np.deg2rad(-30)\n#q3[25] = np.deg2rad(-65)\n#q3[26] = np.deg2rad(20)\n#q3[27] = np.deg2rad(-95)\n#q3[28] = np.deg2rad(20)\n#q3[29] = np.deg2rad(0)\n#q3[30] = np.deg2rad(0)\n\n# REACH FROM TOP +0.2\nq3[24] = np.deg2rad(-31.8328)\nq3[25] = np.deg2rad(-39.7085)\nq3[26] = np.deg2rad(11.934)\nq3[27] = np.deg2rad(-81.7872)\nq3[28] = np.deg2rad(43.8094)\nq3[29] = np.deg2rad(-7.5974)\nq3[30] = np.deg2rad(4.1521)\n\n## DESIRED POSE\n#q3[24] = np.deg2rad(-16.4598)\n#q3[25] = np.deg2rad(-20.2305)\n#q3[26] = np.deg2rad(-7.2484)\n#q3[27] = np.deg2rad(-68.7167)\n#q3[28] = np.deg2rad(-19.2153)\n#q3[29] = np.deg2rad(-5.3767)\n#q3[30] = np.deg2rad(-17.1152)\n\n\n#drill_pose3 = create_drill_relative_pose(drill_x=drill_x-0.06, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw+10)\n\n#drill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.05, drill_y=drill_y-0.3, drill_z=drill_z, drill_yaw=drill_yaw+10)\ndrill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.16, drill_y=drill_y-0.2276, drill_z=drill_z+0.17, drill_yaw=drill_yaw) # TODO: CHECK IF IT IS OK +0.17 WITH DRILL\n#drill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.16, drill_y=drill_y-0.2276, drill_z=drill_z, drill_yaw=drill_yaw)\ncondition3 = create_bigman_drill_condition(q3, drill_pose3, bigman_env.get_state_info(),\n joint_idxs=bigman_params['joint_ids'][body_part_sensed])\nbigman_env.add_condition(condition3)\nreset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose3)\ndrill_relative_poses.append(drill_pose3)\n\n\n# # q4 = q0.copy()\n# q4 = np.zeros(31)\n# drill_pose4 = create_drill_relative_pose(drill_x=drill_x, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw-5)\n# condition4 = create_bigman_drill_condition(q4, drill_pose4, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition4)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose4)\n# drill_relative_poses.append(drill_pose4)\n\n\n\n\n# #################### #\n# #################### #\n# ## DEMONSTRATIONS ## #\n# #################### #\n# #################### #\n\ndemos_samples = None\n\nif generate_dual_sets is True:\n print(\"\")\n change_print_color.change('GREEN')\n if dual_dir is None:\n task_space_torque_control_dual_params = {\n '_active_joints': 'RA',\n 'n_good_samples': 3,\n 'n_bad_samples': 3,\n 'conditions_to_sample': range(len(bigman_env.get_conditions())),\n 'Tinterm': Tinterm,\n 'Treach': Treach,\n 'Tlift': Tlift,\n 'Tinter': Tinter,\n 'Tend': Tend,\n 'Ts': Ts,\n 'noisy': True,\n 'noise_hyperparams': {\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n },\n 'bigman_env': bigman_env,\n 'drill_relative_poses': drill_relative_poses, # THIS\n 'drill_relative_pose_cond_id': range(-8, -1), # OR THIS\n 'drill_size': drill_size,\n 'final_drill_height': final_drill_height,\n # offsets [roll, pitch, yaw, x, y, z]\n 'good_interms': [[50, 0, 0, 0, -0.13, 0.10],\n [45, 0, 0, 0, -0.10, 0.10],\n [50, 0, 0, 0, -0.13, 0.10],\n ],\n 'bad_interms': [[90, 0, 0, 0, -0.00, 0.10],\n [90, 0, 0, 0, -0.00, 0.10],\n [90, 0, 0, 0, -0.00, 0.10],\n ],\n\n 'good_offsets': [[-5, 0, 0, 0., -0.04, -0.08],\n [0, 0, 0, 0., -0.04, -0.08],\n [0, 0, 0, 0, -0.13, -0.08],\n ],\n 'bad_offsets': [[0, 0, 0, 0., -0.04, 0.01],\n [10, 0, 0, 0., -0.04, 0.01],\n [0, 0, 0, 0., -0.04, 0.01],\n ],\n }\n\n good_trajs, bad_trajs = task_space_torque_control_dual_demos(**task_space_torque_control_dual_params)\n bigman_env.reset(time=2, cond=0)\n\n else:\n good_trajs, bad_trajs = load_task_space_torque_control_dual_demos(dual_dir)\n print('Good/bad dual samples has been obtained from directory %s' % dual_dir)\n\nelse:\n good_trajs = None\n bad_trajs = None\n\n\n# ######################## #\n# ######################## #\n# ## LEARNING ALGORITHM ## #\n# ######################## #\n# ######################## #\nchange_print_color.change('YELLOW')\nprint(\"\\nConfiguring learning algorithm...\\n\")\n\n# Learning params\nresume_training_itr = None # Resume from previous training iteration\n# data_files_dir = 'GPS_2017-09-01_15:22:55' # None # In case we want to resume from previous training\ndata_files_dir = None # 'GPS_2017-09-05_13:07:23' # None # In case we want to resume from previous training\n\n\nif not generate_dual_sets:\n # # init_traj_distr values can be lists if they are different for each condition\n # init_traj_distr = {'type': init_lqr,\n # # Parameters to calculate initial COST function based on stiffness\n # 'init_var': 3.0e-1, # Initial Variance\n # 'stiffness': 5.0e-1, # Stiffness (multiplies q)\n # 'stiffness_vel': 0.01, # 0.5, # Stiffness_vel*stiffness (multiplies qdot)\n # 'final_weight': 10.0, # Multiplies cost at T\n # # Parameters for guessing dynamics\n # 'init_acc': np.zeros(action_dim), # dU vector(np.array) of accelerations, default zeros.\n # #'init_gains': 1.0*np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n # #'init_gains': 1.0/np.array([5000.0, 8000.0, 5000.0, 5000.0, 300.0, 2000.0, 300.0]), # dU vector(np.array) of gains, default ones.\n # 'init_gains': np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n # }\n init_traj_distr = {'type': init_pd,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active]))*0.3e-1, # Initial variance (Default:10)\n 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n #'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 2.0e-1, 2.0e-1, 2.0e-1])*1.0e+0,\n #'init_var': np.ones(7)*0.5,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Initial variance (Default:10)\n # 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,\n # 3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0, # Initial variance (Default:10)\n 'pos_gains': 0.001, #np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])*1.0e+1, # 0.001, # Position gains (Default:10)\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': len(bigman_params['joint_ids'][body_part_sensed]), # Total joints in state\n 'state_to_pd': 'joints', # Joints\n 'dDistance': 6,\n }\nelse:\n init_traj_distr = {'type': init_dual_demos,\n 'good_sample_list': good_trajs,\n 'bad_sample_list': bad_trajs,\n 'max_init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n }\n\n# Dynamics\nlearned_dynamics = {'type': DynamicsLRPrior,\n 'regularization': 1e-6,\n 'prior': {\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n 'strength': 1.0, # Adjusts the strength of the prior.\n },\n }\n\n# Trajectory Optimization Options\ntraj_opt_mdreps = [\n {'type': TrajOptMDREPS,\n 'good_const': True, # Use good constraints\n 'bad_const': True, # Use bad constraints\n 'del0': 1e-4, # Eta updates for non-SPD Q-function (non-SPD correction step).\n 'del0_good': 1e-4, # Omega updates for non-SPD Q-function (non-SPD correction step).\n 'del0_bad': 1e-8, # Nu updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'min_omega': 1e-8, # At min_omega, kl_div > kl_step\n 'max_omega': 5.0e-1, #1e16, # At max_omega, kl_div < kl_step\n 'min_nu': 1e-8, # At min_nu, kl_div > kl_step\n 'max_nu': 5.0e-1, # At max_nu, kl_div < kl_step,\n 'step_tol': 0.1,\n 'bad_tol': 0.2,\n 'good_tol': 0.6, #0.3,\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n },\n # {'type': TrajOptMDREPS,\n # 'good_const': False, # Use good constraints\n # 'bad_const': True, # Use bad constraints\n # 'del0': 1e-4, # Eta updates for non-SPD Q-function (non-SPD correction step).\n # 'del0_good': 1e-4, # Omega updates for non-SPD Q-function (non-SPD correction step).\n # 'del0_bad': 1e-8, # Nu updates for non-SPD Q-function (non-SPD correction step).\n # # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n # 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n # 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n # 'min_omega': 1e-8, # At min_omega, kl_div > kl_step\n # 'max_omega': 1e16, # At max_omega, kl_div < kl_step\n # 'min_nu': 1e-8, # At min_nu, kl_div > kl_step\n # 'max_nu': 2.0e1, # At max_nu, kl_div < kl_step,\n # 'step_tol': 0.1,\n # 'bad_tol': 0.2,\n # 'good_tol': 0.3,\n # 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n # 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n # 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n # },\n # {'type': TrajOptMDREPS,\n # 'good_const': True, # Use good constraints\n # 'bad_const': True, # Use bad constraints\n # 'del0': 1e-4, # Eta updates for non-SPD Q-function (non-SPD correction step).\n # 'del0_good': 1e-4, # Omega updates for non-SPD Q-function (non-SPD correction step).\n # 'del0_bad': 1e-8, # Nu updates for non-SPD Q-function (non-SPD correction step).\n # # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n # 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n # 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n # 'min_omega': 1e-8, # At min_omega, kl_div > kl_step\n # 'max_omega': 1e16, # At max_omega, kl_div < kl_step\n # 'min_nu': 1e-8, # At min_nu, kl_div > kl_step\n # 'max_nu': 2.0e1, # At max_nu, kl_div < kl_step,\n # 'step_tol': 0.1,\n # 'bad_tol': 0.2,\n # 'good_tol': 0.3,\n # 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n # 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n # 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n # },\n ]\n\n# GPS algo hyperparameters\nmdreps_hyperparams = [\n {'inner_iterations': 1,\n 'good_samples': good_trajs,\n 'bad_samples': bad_trajs,\n 'n_bad_samples': 2, # Number of bad samples per each trajectory\n 'n_good_samples': 2, # Number of bad samples per each trajectory\n 'base_kl_bad': 4.2, #5., # 2.5, # (xi) to be used with multiplier | kl_div_b >= kl_bad\n 'base_kl_good': 1.0,#2.0, # (chi) to be used with multiplier | kl_div_g <= kl_good\n 'bad_traj_selection_type': 'only_traj', # 'always', 'only_traj'\n 'good_traj_selection_type': 'only_traj', # 'always', 'only_traj'\n 'duality_dynamics_type': 'duality', # Samples to use to update the dynamics 'duality', 'iteration'\n 'init_eta': 4.62,\n 'init_nu': 0.001,\n 'init_omega': 0.001,\n 'min_bad_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_bad in LQR)\n 'max_bad_mult': 1.0, # Max possible value of step multiplier (multiplies base_kl_bad in LQR)\n 'min_good_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_good in LQR)\n 'max_good_mult': 1.0, # Max possible value of step multiplier (multiplies base_kl_good in LQR)\n 'min_bad_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n 'min_good_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n 'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add',\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n },\n # {'inner_iterations': 1,\n # 'good_samples': good_trajs,\n # 'bad_samples': bad_trajs,\n # 'n_bad_samples': 2, # Number of bad samples per each trajectory\n # 'n_good_samples': 2, # Number of bad samples per each trajectory\n # 'base_kl_bad': 2.5, # (chi) to be used with multiplier | kl_div_b >= kl_bad\n # 'base_kl_good': 1.0, # (xi) to be used with multiplier | kl_div_g <= kl_good\n # 'bad_traj_selection_type': 'always', # 'always', 'only_traj'\n # 'good_traj_selection_type': 'always', # 'always', 'only_traj'\n # 'duality_dynamics_type': 'duality', # Samples to use to update the dynamics 'duality', 'iteration'\n # 'init_eta': 4.62,\n # 'init_nu': 0.5,\n # 'init_omega': 1.0,\n # 'min_bad_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_bad in LQR)\n # 'max_bad_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_bad in LQR)\n # 'min_good_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_good in LQR)\n # 'max_good_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_good in LQR)\n # 'min_bad_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n # 'min_good_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n # 'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n # 'policy_sample_mode': 'add',\n # 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n # 'policy_prior': {'type': ConstantPolicyPrior,\n # 'strength': 1e-4,\n # },\n # },\n # {'inner_iterations': 1,\n # 'good_samples': good_trajs,\n # 'bad_samples': bad_trajs,\n # 'n_bad_samples': 2, # Number of bad samples per each trajectory\n # 'n_good_samples': 2, # Number of bad samples per each trajectory\n # 'base_kl_bad': 2.5, # (chi) to be used with multiplier | kl_div_b >= kl_bad\n # 'base_kl_good': 1.0, # (xi) to be used with multiplier | kl_div_g <= kl_good\n # 'bad_traj_selection_type': 'always', # 'always', 'only_traj'\n # 'good_traj_selection_type': 'always', # 'always', 'only_traj'\n # 'duality_dynamics_type': 'duality', # Samples to use to update the dynamics 'duality', 'iteration'\n # 'init_eta': 4.62,\n # 'init_nu': 0.5,\n # 'init_omega': 1.0,\n # 'min_bad_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_bad in LQR)\n # 'max_bad_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_bad in LQR)\n # 'min_good_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_good in LQR)\n # 'max_good_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_good in LQR)\n # 'min_bad_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n # 'min_good_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n # 'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n # 'policy_sample_mode': 'add',\n # 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n # 'policy_prior': {'type': ConstantPolicyPrior,\n # 'strength': 1e-4,\n # },\n # },\n]\n\ngps_algo_hyperparams = mdreps_hyperparams\ntraj_opt_method = traj_opt_mdreps\nsample_on_policy = False\nuse_global_policy = True\ntest_after_iter = True\n#use_global_policy = False\n\n\ngps_hyperparams = {\n 'T': int(EndTime/Ts), # Total points\n 'dt': Ts,\n 'iterations': 25, # 100 # 2000 # GPS episodes, \"inner iterations\" --> K iterations\n 'test_after_iter': test_after_iter, # If test the learned policy after an iteration in the RL algorithm\n 'test_samples': 2, # Samples from learned policy after an iteration PER CONDITION (only if 'test_after_iter':True)\n # Samples\n 'num_samples': 6, # 20 # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'sample_on_policy': sample_on_policy, # Whether generate on-policy samples or off-policy samples\n #'noise_var_scale': np.array([5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2]), # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n #'noise_var_scale': np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*10, # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n #'smooth_noise_var': 5.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_var': 8.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n 'cost': cost_sum,\n # Conditions\n 'conditions': len(bigman_env.get_conditions()), # Total number of initial conditions\n 'train_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for training\n 'test_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for testing\n # KL step (epsilon)\n 'kl_step': 0.2, # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step in LQR)\n 'max_step_mult': 10.0, # Previous 23/08 -> 1.0 #3 # 10.0, # Max possible value of step multiplier (multiplies kl_step in LQR)\n # Others\n 'gps_algo_hyperparams': gps_algo_hyperparams,\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': True,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-6, #1e-2,# 1e-6, # Max value for x0sigma in trajectories # TODO: CHECK THIS VALUE, maybe it is too low\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization # CHECK THIS VALUE!!!, I AM USING ZERO!!\n 'use_global_policy': use_global_policy,\n 'data_files_dir': data_files_dir,\n}\n\nlearn_algo = MULTIGPS(bigman_agents, bigman_env, **gps_hyperparams)\n\nprint(\"Learning algorithm: %s OK\\n\" % type(learn_algo))\n\n# Optimize policy using learning algorithm\nraw_input(\"Press a key to start...\")\nprint(\"Running Learning Algorithm!!!\")\ntraining_successful = learn_algo.run(resume_training_itr)\nif training_successful:\n print(\"Learning Algorithm has finished SUCCESSFULLY!\")\nelse:\n print(\"Learning Algorithm has finished WITH ERRORS!\")\n\n\n# ############################## #\n# ############################## #\n# ## SAMPLE FROM FINAL POLICY ## #\n# ############################## #\n# ############################## #\nif training_successful:\n conditions_to_sample = gps_hyperparams['test_conditions']\n change_print_color.change('GREEN')\n n_samples = 1\n noisy = False\n sampler_hyperparams = {\n 'noisy': noisy,\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n 'T': int(EndTime/Ts)*1, # Total points\n 'dt': Ts\n }\n #sampler = Sampler(bigman_agent.policy, bigman_env, **sampler_hyperparams)\n sampler = Sampler(learn_algo.cur[0].traj_distr, bigman_env, **sampler_hyperparams)\n print(\"Sampling from final policy!!!\")\n sample_lists = list()\n for cond_idx in conditions_to_sample:\n raw_input(\"\\nSampling %d times from condition %d and with policy:%s (noisy:%s). \\n Press a key to continue...\" %\n (n_samples, cond_idx, type(bigman_agent.policy), noisy))\n sample_list = sampler.take_samples(n_samples, cond=cond_idx, noisy=noisy)\n # costs = learn_algo._eval_conditions_sample_list_cost([sample_list])\n # # print(costs)\n # # raw_input('pppp')\n # sample_lists.append(sample_list)\n\n bigman_env.reset(time=1, cond=0)\n\n\n\n\nprint(\"The script has finished!\")\nos._exit(0)\n\n" }, { "alpha_fraction": 0.8505747318267822, "alphanum_fraction": 0.8505747318267822, "avg_line_length": 42.5, "blob_id": "611cb9007a476c8f7a0ac9ccebfbbe33d3706d5c", "content_id": "cd3255c1b10eb18715bcc3486b5be2007772c880", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "permissive", "max_line_length": 48, "num_lines": 4, "path": "/robolearn/torch/models/values/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .nn_qfunction import NNQFunction\nfrom .nn_vfunction import NNVFunction\nfrom .nn_multi_qfunction import NNMultiQFunction\nfrom .nn_multi_vfunction import NNMultiVFunction\n" }, { "alpha_fraction": 0.5565708875656128, "alphanum_fraction": 0.5700372457504272, "avg_line_length": 26.445545196533203, "blob_id": "0c478d25405d8f0124888d604e246450f346655e", "content_id": "8a9fc3edb8cfb9c80ade483b55919f943dd75c9e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8317, "license_type": "permissive", "max_line_length": 81, "num_lines": 303, "path": "/robolearn/utils/plots/rollout_plots.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import subplots\nfrom builtins import input\n\nIU_COLORS = [\n 'black',\n 'red',\n 'saddlebrown',\n 'green',\n 'magenta',\n 'orange',\n 'blue',\n 'cadetblue',\n 'mediumslateblue'\n]\n\nCOMPO_COLORS = [\n 'black',\n 'red',\n 'saddlebrown',\n 'green',\n 'magenta',\n 'orange',\n 'blue',\n 'cadetblue',\n 'mediumslateblue'\n]\n\nSTATE_COLORS = [\n 'black',\n 'red',\n 'saddlebrown',\n 'green',\n 'magenta',\n 'orange',\n 'blue',\n 'cadetblue',\n 'mediumslateblue'\n]\n\nACTION_COLORS = [\n 'black',\n 'red',\n 'saddlebrown',\n 'green',\n 'magenta',\n 'orange',\n 'blue',\n 'cadetblue',\n 'mediumslateblue'\n]\n\n\ndef plot_reward_composition(path_list, ignore_last=True, block=False):\n n_reward_vector = len(path_list['env_infos'][-1]['reward_vector'])\n H = len(path_list['env_infos'])\n fig, axs = subplots(n_reward_vector+1, sharex=True)\n\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Composition of Rewards',\n fontweight='bold')\n\n data = np.zeros((n_reward_vector+1, H))\n\n ts = np.arange(H)\n for rr in range(n_reward_vector):\n for tt in range(H):\n data[rr, tt] = \\\n path_list['env_infos'][tt]['reward_vector'][rr]\n axs[rr].plot(ts, data[rr, :], color=COMPO_COLORS[rr], linestyle=':')\n axs[rr].set_ylabel('%02d' % rr)\n # ax = fig.add_subplot(n_reward_vector, 1, rr+1)\n # ax.plot(ts, data)\n data[-1, :] = np.sum(data[:n_reward_vector, :], axis=0)\n\n if ignore_last:\n rewards_to_plot = n_reward_vector-1\n else:\n rewards_to_plot = n_reward_vector\n\n for rr in range(rewards_to_plot):\n axs[-1].plot(ts, data[rr, :], linestyle=':', label='%02d' % rr,\n color=COMPO_COLORS[rr])\n axs[-1].plot(ts, data[-1, :], linewidth=2,\n color=COMPO_COLORS[n_reward_vector], label='Reward')\n axs[-1].set_ylabel('Reward')\n axs[-1].set_xlabel('Time step')\n axs[-1].legend()\n\n plt.show(block=block)\n\n return fig, axs\n\n\ndef plot_reward_composition_v010(cost_list, ignore_last=False,\n plot_last=False):\n n_reward_vector = len(cost_list)\n H = len(cost_list[-1])\n\n fig, axs = subplots(n_reward_vector+1, sharex=True)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Reward composition',\n fontweight='bold')\n\n # fig = plt.figure()\n # ax = fig.add_subplot(n_reward_vector, 1, 1)\n\n data = np.zeros((n_reward_vector+1, H))\n\n ts = np.arange(H)\n for rr in range(n_reward_vector):\n data[rr, :] = cost_list[rr]\n axs[rr].plot(ts, data[rr, :], color=COMPO_COLORS[rr], linestyle=':')\n axs[rr].set_ylabel('Reward %02d' % rr)\n # ax = fig.add_subplot(n_reward_vector, 1, rr+1)\n # ax.plot(ts, data)\n data[-1, :] = np.sum(data[:n_reward_vector, :], axis=0)\n\n if ignore_last:\n rewards_to_plot = n_reward_vector-1\n else:\n rewards_to_plot = n_reward_vector\n\n if plot_last:\n max_t = H\n else:\n max_t = H - 1\n\n for rr in range(rewards_to_plot):\n axs[-1].plot(ts[:max_t], data[rr, :max_t], linestyle=':',\n label='%02d' % rr, color=COMPO_COLORS[rr])\n\n axs[-1].plot(ts[:max_t], data[-1, :max_t], linewidth=2,\n color=COMPO_COLORS[n_reward_vector], label='Total Reward')\n axs[-1].set_xlabel('Time')\n axs[-1].legend()\n\n plt.show(block=False)\n\n\ndef plot_reward_iu(path_list, block=False):\n\n H = len(path_list['rewards'])\n n_unintentional = len(path_list['env_infos'][-1]['reward_multigoal'])\n fig, axs = subplots(n_unintentional+1, sharex=True)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Reward of Intentional and Unintentional Policies',\n fontweight='bold')\n\n data = np.zeros((n_unintentional+1, H))\n\n ts = np.arange(H)\n for tt in range(H):\n for uu in range(n_unintentional):\n data[uu, tt] = \\\n path_list['env_infos'][tt]['reward_multigoal'][uu]\n # ax = fig.add_subplot(n_reward_vector, 1, rr+1)\n # ax.plot(ts, data)\n data[-1, :] = path_list['rewards'].squeeze()\n\n for aa, ax in enumerate(axs[:-1]):\n ax.plot(ts, data[aa, :], linestyle=':', label='U-%02d' % aa,\n color=COMPO_COLORS[aa])\n ax.set_ylabel('Reward U-%02d' % aa)\n\n axs[-1].plot(ts, data[-1, :], linewidth=2,\n color=COMPO_COLORS[n_unintentional+1], label='I')\n axs[-1].set_ylabel('Reward Intentional')\n\n axs[-1].set_xlabel('Time step')\n # axs[-1].legend()\n\n plt.show(block=block)\n\n return fig, axs\n\n\ndef plot_weigths_unintentionals(path_list, block=False):\n \"\"\"Plot the weights of the set of unintentional policies.\"\"\"\n if 'mixing_coeff' not in path_list['agent_infos'][-1]:\n print('There is not mixing_coeff. Then not plotting anything!')\n return\n H = len(path_list['agent_infos'])\n act_dim = path_list['agent_infos'][-1]['mixing_coeff'].shape[0]\n n_unintentional = path_list['agent_infos'][-1]['mixing_coeff'].shape[1]\n\n fig, axs = subplots(act_dim, sharex=True)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Mixing weights for Unintentional Policies',\n fontweight='bold')\n\n data = np.zeros((H, act_dim, n_unintentional))\n for tt in range(H):\n data[tt] = path_list['agent_infos'][tt]['mixing_coeff']\n # print(tt, '|', data[tt])\n\n ts = np.arange(H)\n for aa in range(act_dim):\n # axs[aa].plot(ts, data[:, aa, :], color=COMPO_COLORS[aa], linestyle=':')\n axs[aa].plot(ts, data[:, aa, :], linestyle=':')\n axs[aa].set_ylabel('U - %02d' % aa)\n axs[aa].set_xlabel('Time step')\n # axs[aa].set_ylim(-0.1, 1.1)\n\n plt.show(block=block)\n\n return fig, axs\n\n\ndef plot_q_vals(path_list, q_fcn, block=False):\n obs = path_list['observations']\n actions = path_list['actions']\n H = obs.shape[0]\n\n fig, axs = subplots(1, sharex=True)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Q-vals',\n fontweight='bold')\n\n q_values = q_fcn.get_values(obs, actions)[0]\n q_values.squeeze(-1)\n\n ts = np.arange(H)\n\n axs[-1].plot(ts, q_values)\n axs[-1].set_ylabel('Q-Value')\n axs[-1].set_xlabel('Time step')\n\n plt.show(block=block)\n\n return fig, axs\n\n\ndef plot_state_v010(state, state_name=None):\n\n H = state.shape[0]\n dS = state.shape[1]\n\n fig, axs = subplots(dS, sharex=True)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n\n if state_name is None:\n state_name = 'State'\n\n fig.suptitle('%s Trajectory' % state_name, fontweight='bold')\n\n # fig = plt.figure()\n # ax = fig.add_subplot(n_reward_vector, 1, 1)\n\n ts = np.arange(H)\n for ss in range(dS):\n axs[ss].plot(ts, state[:, ss], color=STATE_COLORS[ss], linestyle='-')\n axs[ss].set_ylabel('State %02d' % ss)\n # ax = fig.add_subplot(n_reward_vector, 1, rr+1)\n # ax.plot(ts, data)\n axs[-1].set_xlabel('Time')\n # axs[-1].legend()\n\n plt.show(block=False)\n\n\ndef plot_action_v010(action, action_name=None):\n\n H = action.shape[0]\n dA = action.shape[1]\n\n fig, axs = subplots(dA, sharex=True)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n\n if action_name is None:\n action_name = 'Action'\n\n fig.suptitle('%s Trajectory' % action_name, fontweight='bold')\n\n # fig = plt.figure()\n # ax = fig.add_subplot(n_reward_vector, 1, 1)\n\n ts = np.arange(H)\n for aa in range(dA):\n axs[aa].plot(ts, action[:, aa], color=ACTION_COLORS[aa], linestyle='-')\n axs[aa].set_ylabel('%s %02d' % (action_name, aa))\n # ax = fig.add_subplot(n_reward_vector, 1, rr+1)\n # ax.plot(ts, data)\n axs[-1].set_xlabel('Time')\n # axs[-1].legend()\n\n plt.show(block=False)\n\n" }, { "alpha_fraction": 0.515709638595581, "alphanum_fraction": 0.5182376503944397, "avg_line_length": 25.12264060974121, "blob_id": "693e77147027fafb933272360803442419fcda91", "content_id": "dcfcf37fb2b6ec65142ddf55bc699c63f32d92e4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2769, "license_type": "permissive", "max_line_length": 76, "num_lines": 106, "path": "/robolearn/torch/policies/lin_gauss_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.models.policies import Policy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom robolearn.utils.serializable import Serializable\nimport robolearn.torch.utils.pytorch_util as ptu\n\n\nclass LinearGaussianPolicy(PyTorchModule, Serializable, Policy):\n def __init__(self,\n obs_dim,\n action_dim,\n T,\n K=None,\n k=None,\n pol_covar_diag=None,\n ):\n\n Policy.__init__(self,\n action_dim=action_dim)\n\n # self._serializable_initialized = False\n # Serializable.quick_init(self, locals())\n self.save_init_params(locals())\n PyTorchModule.__init__(self)\n\n self._obs_dim = obs_dim\n\n if K is None:\n K = torch.rand((self.action_dim, self._obs_dim)).repeat(T, 1, 1)\n else:\n K = ptu.from_numpy(K)\n\n if k is None:\n k = torch.zeros(T, self.action_dim)\n else:\n k = ptu.from_numpy(k)\n\n if pol_covar_diag is None:\n pol_covar_diag = torch.ones(self.action_dim).repeat(T, 1, 1)\n else:\n pol_covar_diag = ptu.from_numpy(pol_covar_diag)\n\n self.K = nn.Parameter(data=K)\n # self.K = K\n\n self.k = nn.Parameter(data=k)\n # self.k = k\n\n self._covar_diag = nn.Parameter(pol_covar_diag)\n # self._covar_diag = pol_covar_diag\n\n self._T = T\n\n # def K(self, t):\n # return torch.diag(self.K_params[t, :])\n\n @property\n def H(self):\n return self._T\n\n @property\n def T(self):\n return self._T\n\n def get_action(self, obs_np, **kwargs):\n values, info_dict = \\\n self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n\n return values[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n return self.eval_np(obs_np, **kwargs)\n\n def forward(\n self,\n obs,\n t=None,\n noise=None,\n return_preactivations=False,\n ):\n if t is None:\n raise NotImplementedError\n else:\n action = F.linear(obs, self.K[t, :, :], self.k[t, :])\n if noise is None:\n noise = 0\n else:\n covar = torch.diag(self._covar_diag[t, :].squeeze())\n noise = F.linear(noise, covar)\n action += noise\n info_dict = dict()\n\n return action, info_dict\n\n def set_K(self, K):\n self.K.data = K\n\n def set_k(self, k):\n self.k.data = k\n" }, { "alpha_fraction": 0.6143791079521179, "alphanum_fraction": 0.6143791079521179, "avg_line_length": 18.95652198791504, "blob_id": "9b90a23a08dab97091bcbfe887a1f115d014ee9c", "content_id": "f38491022b95434c16529c93ba95aeea2855e128", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "permissive", "max_line_length": 53, "num_lines": 23, "path": "/robolearn/models/values/v_function.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import abc\nfrom future.utils import with_metaclass\n\n\nclass VFunction(with_metaclass(abc.ABCMeta, object)):\n \"\"\"\n Base state value function (V-function) interface.\n :math:`V(s_t)`\n \"\"\"\n\n def __init__(self, obs_dim):\n self._obs_dim = obs_dim\n\n @abc.abstractmethod\n def get_value(self, observation):\n pass\n\n def get_values(self, observations):\n pass\n\n @property\n def obs_dim(self):\n return self._obs_dim\n" }, { "alpha_fraction": 0.60597825050354, "alphanum_fraction": 0.6182065010070801, "avg_line_length": 20.02857208251953, "blob_id": "7a6ba5dc4fc092908def79850c60d016f10b22ef", "content_id": "17a6c323c1eb1a45663923ca2b9a94258943a7e3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "permissive", "max_line_length": 55, "num_lines": 35, "path": "/examples/simple_envs/test_crawling_robot.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from builtins import input\nimport numpy as np\nfrom robolearn.envs.simple_envs import CrawlingRobotEnv\n\nenv_params = dict(\n horizon=np.inf,\n render=True,\n)\nenv = CrawlingRobotEnv(**env_params)\n\n# for ii in range(400):\n# env.reset()\n# env.render()\n\nenv.reset()\nenv.render()\n# input('Press a key to start...')\n\nfor ii in range(1000):\n action = env.action_space.sample()\n obs, reward, done, env_info = env.step(action)\n print('---'*3, ii, '---'*3)\n print('action -->', action)\n print('obs -->', obs)\n print('reward -->', reward)\n print('done -->', done)\n print('info -->', env_info)\n env.render()\n\ninput('Press a key to reset...')\n\nenv.reset()\nenv.render()\n\ninput('Press a key to close the script')\n" }, { "alpha_fraction": 0.6189599633216858, "alphanum_fraction": 0.6488725543022156, "avg_line_length": 24.26744270324707, "blob_id": "387a124478d7b8e6991eb7e905d7b313ad993f6d", "content_id": "a672138b59f0cb350c05e09410f9cb8c8b122ab5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2173, "license_type": "permissive", "max_line_length": 71, "num_lines": 86, "path": "/scenarios/tests/check_reachermodel.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.old_envs.pusher3dof import Pusher3DofBulletEnv\nfrom builtins import input\n\n\nprint(\"\\nCreating Environment...\")\n\n# Environment parameters\nenv_with_img = False\nrdn_tgt_pos = False\nrender = True\nobs_like_mjc = False\nntargets = 2\ntgt_positions = [[0.1, 0.2], [-0.1, -0.2]]\ntgt_types = ['CS', 'C']\nsim_timestep = 0.001\nframe_skip = int(0.01/sim_timestep)\n\nenv = Pusher3DofBulletEnv(render=render, obs_with_img=env_with_img,\n obs_mjc_gym=obs_like_mjc, ntargets=ntargets,\n rdn_tgt_pos=rdn_tgt_pos, tgt_types=tgt_types,\n sim_timestep=sim_timestep,\n frame_skip=frame_skip,\n obs_distances=True,\n half_env=True)\n\n# env.set_tgt_cost_weights(tgt_weights)\nenv.set_tgt_pos(tgt_positions)\n\n\n#\nmean_init_cond = [20, 15, 5, 0.60, -0.10] # des joint_pos, obstacle\nstd_init_cond = [30, 30, 10, 0.1, 0.1] # lim/2\ninit_joint_pos = [-90, 20, 2]\nn_init_cond = 15\ncond_mean = np.array(mean_init_cond)\ncond_std = np.array(std_init_cond)\n\ntgt_idx = [6, 7, 8]\nobst_idx = [9, 10] # Only X-Y is random\nseed = 0\n\n# Set the np seed\nnp.random.seed(seed)\n\nall_init_conds = np.zeros((int(n_init_cond), 9))\n\nall_rand_data = np.random.rand(n_init_cond, len(cond_mean))\n\n\n# ADD SPECIFIC DATA:\nidx_rdn_data = 5\n\nprint(all_rand_data.shape)\nrand_data = all_rand_data[idx_rdn_data, :]\ninit_cond = cond_std*rand_data + cond_mean\njoint_pos = np.deg2rad(init_cond[:3])\n\nenv_condition = np.zeros(env.obs_dim)\nenv_condition[:env.action_dim] = joint_pos\n# env_condition[obst_idx] = init_cond[3:]\n\n# Temporally hack for getting ee _object\nenv.add_custom_init_cond(env_condition)\nenv.reset(condition=-1)\n# obs = self.env.get_observation()\ndes_tgt = env.get_ee_pose()\nenv.clear_custom_init_cond(-1)\n\nenv_condition[:3] = np.deg2rad(init_joint_pos)\nenv_condition[tgt_idx] = des_tgt\nenv_condition[obst_idx] = init_cond[3:]\n\n# Now add the target properly\nprint('INIT COND', env_condition)\nenv.add_custom_init_cond(env_condition)\n\ninput('adsfkhk')\n\nprint(\"Environment:%s OK!.\" % type(env).__name__)\n\n\nenv.reset()\n\n\ninput('Press key to close')\n" }, { "alpha_fraction": 0.6360874772071838, "alphanum_fraction": 0.6427703499794006, "avg_line_length": 30.653846740722656, "blob_id": "c3c42114c04f3654feeacb2874818172eaa441d7", "content_id": "fe3f70313f14d60b416944a353c7cc66b8dafba1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1646, "license_type": "permissive", "max_line_length": 77, "num_lines": 52, "path": "/robolearn/utils/samplers/utils.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef stack_paths(paths):\n \"\"\"\n Stack multiples obs/actions/etc. from different paths\n :param paths: List of paths, where one path is something returned from\n the rollout functino above.\n :return: Tuple. Every element will have shape batch_size X DIM, including\n the rewards and terminal flags.\n \"\"\"\n rewards = [path[\"rewards\"].reshape(-1, 1) for path in paths]\n terminals = [path[\"terminals\"].reshape(-1, 1) for path in paths]\n actions = [path[\"actions\"] for path in paths]\n obs = [path[\"observations\"] for path in paths]\n next_obs = [path[\"next_observations\"] for path in paths]\n rewards = np.vstack(rewards)\n terminals = np.vstack(terminals)\n obs = np.vstack(obs)\n actions = np.vstack(actions)\n next_obs = np.vstack(next_obs)\n assert len(rewards.shape) == 2\n assert len(terminals.shape) == 2\n assert len(obs.shape) == 2\n assert len(actions.shape) == 2\n assert len(next_obs.shape) == 2\n return rewards, terminals, obs, actions, next_obs\n\n\ndef stack_paths_to_dict(paths):\n rewards, terminals, obs, actions, next_obs = stack_paths(paths)\n return dict(\n rewards=rewards,\n terminals=terminals,\n observations=obs,\n actions=actions,\n next_observations=next_obs,\n )\n\n\ndef get_stat_in_paths(paths, dict_name, scalar_name):\n if len(paths) == 0:\n return np.array([[]])\n\n if type(paths[0][dict_name]) == dict:\n # Support rllab interface\n return [path[dict_name][scalar_name] for path in paths]\n\n return [\n [info[scalar_name] for info in path[dict_name]]\n for path in paths\n ]\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 72, "blob_id": "52d72422fe57e33e62f7e1f5f35a951c3ef0090f", "content_id": "3559818c9d8b22b3c16184c9bc8763950cadbd33", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "permissive", "max_line_length": 72, "num_lines": 1, "path": "/scenarios/humanoids2018/plots/others/README.md", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "This scripts could not work. They were created for previous experiments." }, { "alpha_fraction": 0.620648980140686, "alphanum_fraction": 0.625663697719574, "avg_line_length": 29.81818199157715, "blob_id": "c9ef59f0ed5ce7f066d74e5759e5414fbe068b5d", "content_id": "4a9bcb7477f3eefe56e4886e6efc1c52b9c311d5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3390, "license_type": "permissive", "max_line_length": 65, "num_lines": 110, "path": "/scripts/sim_reacher.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers import rollout\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import set_gpu_mode\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn_gym_envs.pybullet import Reacher2D3DofBulletEnv\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.models.policies import ExplorationPolicy\nimport argparse\nimport joblib\nimport uuid\nfrom robolearn.utils.logging import logger\nimport json\nimport numpy as np\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfilename = str(uuid.uuid4())\nSEED = 110\n\n\ndef simulate_policy(args):\n\n np.random.seed(SEED)\n ptu.seed(SEED)\n\n data = joblib.load(args.file)\n if args.deterministic:\n print('Using the deterministic version of the policy.')\n if isinstance(data['policy'], ExplorationPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = data['policy']\n else:\n print('Using the stochastic policy.')\n policy = data['exploration_policy']\n\n print(\"Policy loaded!!\")\n\n # Load environment\n with open('variant.json') as json_data:\n env_params = json.load(json_data)['env_params']\n\n env_params['is_render'] = True\n env = NormalizedBoxEnv(\n Reacher2D3DofBulletEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n print(\"Environment loaded!!\")\n\n if args.gpu:\n set_gpu_mode(True)\n policy.cuda()\n if isinstance(policy, MakeDeterministic):\n if isinstance(policy.stochastic_policy, PyTorchModule):\n policy.stochastic_policy.train(False)\n else:\n if isinstance(policy, PyTorchModule):\n policy.train(False)\n\n while True:\n if args.record:\n rollout_start_fcn = lambda: \\\n env.start_recording_video('reacher_video.mp4')\n rollout_end_fcn = lambda: \\\n env.stop_recording_video()\n else:\n rollout_start_fcn = None\n rollout_end_fcn = None\n\n obs_normalizer = data.get('obs_normalizer')\n\n path = rollout(\n env,\n policy,\n max_path_length=args.H,\n animated=True,\n obs_normalizer=obs_normalizer,\n rollout_start_fcn=rollout_start_fcn,\n rollout_end_fcn=rollout_end_fcn,\n )\n\n if hasattr(env, \"log_diagnostics\"):\n env.log_diagnostics([path])\n\n logger.dump_tabular()\n\n if args.record:\n break\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./params.pkl',\n help='path to the snapshot file')\n parser.add_argument('--H', type=int, default=500,\n help='Max length of rollout')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--deterministic', action=\"store_true\")\n parser.add_argument('--record', action=\"store_true\")\n parser.add_argument('--env', type=str, default='manipulator')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n args = parser.parse_args()\n\n simulate_policy(args)\n input('Press a key to finish the script')\n" }, { "alpha_fraction": 0.8601036071777344, "alphanum_fraction": 0.8808290362358093, "avg_line_length": 63.33333206176758, "blob_id": "45d0704bebc0ac63e8c6d351a16ff580cf1c37b4", "content_id": "3b46630c63fbd08e7dff67b6796a68a5a161d842", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "permissive", "max_line_length": 68, "num_lines": 3, "path": "/robolearn/envs/simple_envs/navigation2d/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .navigation2d_goalcompo_env import Navigation2dGoalCompoEnv\nfrom .navigation2d_goalcompo_q_plot import QFPolicyPlotter\nfrom .navigation2d_goalcompo_multiq_plot import MultiQFPolicyPlotter\n" }, { "alpha_fraction": 0.5676100850105286, "alphanum_fraction": 0.6033805012702942, "avg_line_length": 23.461538314819336, "blob_id": "94beb4295b45259cc6680c752467f9d891492bdb", "content_id": "e02118a572fba1247b6e28f3d93225336d70a081", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2544, "license_type": "permissive", "max_line_length": 86, "num_lines": 104, "path": "/examples/rl_algos/spinningup/centauro/test_sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom spinup import sac\nfrom spinup.algos.sac.core import mlp_actor_critic\n\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\n\nfrom spinup.utils.run_utils import setup_logger_kwargs\n\nEPOCHS = 1000\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.01\nFRAME_SKIP = 1\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 1\nPATHS_PER_EVAL = 2\nBATCH_SIZE = 128\n\nSEED = 1010\n# NP_THREADS = 6\n\nSUBTASK = None\n\nEXP_NAME = 'prueba_centauro1_sac'\n\nenv_params = dict(\n is_render=False,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n active_joints='RA',\n control_mode='joint_tasktorque',\n # _control_mode='torque',\n balance_cost_weight=1.0,\n fall_cost_weight=1.0,\n tgt_cost_weight=3.0,\n # tgt_cost_weight=50.0,\n balance_done_cost=0., # 2.0*PATH_LENGTH, # TODO: dont forget same balance weight\n tgt_done_reward=0., # 20.0,\n ctrl_cost_weight=1.0e-1,\n use_log_distances=True,\n log_alpha_pos=1e-4,\n log_alpha_ori=1e-4,\n goal_tolerance=0.05,\n min_obj_height=0.60,\n max_obj_height=1.20,\n max_obj_distance=0.20,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n subtask=SUBTASK,\n random_init=True,\n seed=SEED,\n)\n\n\ndef main():\n # Environment Fcn\n env_fn = lambda: \\\n NormalizedBoxEnv(\n CentauroTrayEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n\n # Logger kwargs\n logger_kwargs = setup_logger_kwargs(EXP_NAME, SEED)\n\n with tf.Graph().as_default():\n sac(\n env_fn,\n actor_critic=mlp_actor_critic,\n ac_kwargs=dict(hidden_sizes=(128, 128, 128)),\n seed=SEED,\n steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n epochs=EPOCHS,\n replay_size=int(1e6),\n gamma=0.99,\n polyak=0.995, # Polyak avg target pol (0-1)\n lr=1e-3,\n alpha=0.2, # entropy regularization coefficient (inv rew scale)\n batch_size=BATCH_SIZE,\n start_steps=10000,\n max_ep_len=PATH_LENGTH, # Max length for trajectory\n logger_kwargs=logger_kwargs,\n save_freq=1\n )\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5828459858894348, "alphanum_fraction": 0.5916179418563843, "avg_line_length": 32.25, "blob_id": "2a14e78a5b1dac6d121fc5b734d950973ebd857a", "content_id": "a71b6b29589aab84ad03ce432515ada90b9b67ab", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7182, "license_type": "permissive", "max_line_length": 85, "num_lines": 216, "path": "/scripts/sim_plot_pusher_weights.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers import rollout\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import set_gpu_mode\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn_gym_envs.pybullet import Pusher2D3DofGoalCompoEnv\nfrom robolearn.torch.policies import MultiPolicySelector\nfrom robolearn.torch.policies import WeightedMultiPolicySelector\nfrom robolearn.torch.policies import TanhGaussianPolicy\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.models.policies import ExplorationPolicy\nimport os\nimport argparse\nimport joblib\nimport uuid\nfrom robolearn.utils.logging import logger\nimport json\nimport numpy as np\nimport robolearn.torch.utils.pytorch_util as ptu\nimport matplotlib.pyplot as plt\n\nfrom robolearn.utils.plots import subplots\nfrom robolearn.utils.plots import get_csv_data\nfrom robolearn.utils.plots import set_latex_plot\n\nfilename = str(uuid.uuid4())\nSEED = 110\n\n\ndef simulate_policy(args):\n\n np.random.seed(SEED)\n ptu.seed(SEED)\n\n data = joblib.load(args.file)\n if args.deterministic:\n if args.un > -1:\n print('Using the deterministic version of the UNintentional policy '\n '%02d.' % args.un)\n if 'u_policy' in data:\n policy = MakeDeterministic(\n MultiPolicySelector(data['u_policy'], args.un))\n # WeightedMultiPolicySelector(data['u_policy'], args.un))\n else:\n # policy = MakeDeterministic(data['u_policies'][args.un])\n if isinstance(data['policy'], TanhGaussianPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = MakeDeterministic(\n WeightedMultiPolicySelector(data['policy'], args.un)\n )\n else:\n print('Using the deterministic version of the Intentional policy.')\n if isinstance(data['policy'], ExplorationPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = data['policy']\n else:\n if args.un > -1:\n print('Using the UNintentional stochastic policy %02d' % args.un)\n if 'u_policy' in data:\n # policy = MultiPolicySelector(data['u_policy'], args.un)\n policy = WeightedMultiPolicySelector(data['u_policy'], args.un)\n else:\n policy = WeightedMultiPolicySelector(data['policy'], args.un)\n # policy = data['policy'][args.un]\n else:\n print('Using the Intentional stochastic policy.')\n # policy = data['exploration_policy']\n policy = data['policy']\n\n print(\"Policy loaded!!\")\n\n # Load environment\n dirname = os.path.dirname(args.file)\n with open(os.path.join(dirname, 'variant.json')) as json_data:\n log_data = json.load(json_data)\n env_params = log_data['env_params']\n H = int(log_data['path_length'])\n\n env_params.pop('goal', None)\n env_params['is_render'] = True\n env = NormalizedBoxEnv(\n Pusher2D3DofGoalCompoEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n print(\"Environment loaded!!\")\n\n if args.gpu:\n set_gpu_mode(True)\n policy.cuda()\n if isinstance(policy, MakeDeterministic):\n if isinstance(policy.stochastic_policy, PyTorchModule):\n policy.stochastic_policy.train(False)\n else:\n if isinstance(policy, PyTorchModule):\n policy.train(False)\n\n while True:\n if args.record:\n rollout_start_fcn = lambda: \\\n env.start_recording_video('pusher_video.mp4')\n rollout_end_fcn = lambda: \\\n env.stop_recording_video()\n else:\n rollout_start_fcn = None\n rollout_end_fcn = None\n\n obs_normalizer = data.get('obs_normalizer')\n\n if args.H != -1:\n H = args.H\n\n path = rollout(\n env,\n policy,\n max_path_length=H,\n animated=True,\n obs_normalizer=obs_normalizer,\n rollout_start_fcn=rollout_start_fcn,\n rollout_end_fcn=rollout_end_fcn,\n )\n\n if hasattr(env, \"log_diagnostics\"):\n env.log_diagnostics([path])\n\n logger.dump_tabular()\n\n if args.record:\n break\n\n answer = input('Plot the weights?')\n\n if answer:\n obj_idxs = env.wrapped_env.get_obs_info('object')['idx']\n plot_weights(path['agent_infos'], path['observations'], obj_idxs)\n else:\n print('Do not plotting anything!')\n\n\ndef plot_weights(agent_infos, observations, obj_idxs, latex_plot=False, block=False):\n T = len(agent_infos)\n nUnint = agent_infos[-1]['mixing_coeff'].shape[0]\n dA = agent_infos[-1]['mixing_coeff'].shape[1]\n\n all_data = np.zeros((T, nUnint, dA))\n touching = np.zeros(T)\n\n for t in range(T):\n all_data[t] = agent_infos[t]['mixing_coeff']\n dist = np.linalg.norm(observations[t][obj_idxs])\n print(t, dist)\n\n touching[t] = dist < 0.1\n\n if latex_plot:\n set_latex_plot()\n\n fig, axs = subplots(dA)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n\n lines = list()\n labels = list()\n\n Ts = 1e-3\n time = np.arange(T)*Ts\n for aa, ax in enumerate(axs):\n for uu in range(nUnint):\n plot_w = ax.plot(time, all_data[:, uu, aa])[0]\n if aa == 0:\n lines.append(plot_w)\n labels.append('Weight U-%02d' % uu)\n\n ax.set_ylabel('Action %d' % (aa+1), fontsize=35)\n\n plot_t = ax.plot(time, touching)[0]\n if aa == 0:\n lines.append(plot_t)\n labels.append('Close to cylinder')\n\n axs[-1].set_xlabel('Time (s)', fontsize=35)\n\n legend = fig.legend(lines, labels, loc='lower center', ncol=3,\n labelspacing=0., prop={'size': 30},\n fancybox=True, #bbox_to_anchor=(1, 1),\n )\n fig.set_size_inches(19, 11) # 1920 x 1080\n fig.tight_layout()\n fig.subplots_adjust(bottom=0.15)\n legend.draggable(True)\n\n plt.show(block=block)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./params.pkl',\n help='path to the snapshot file')\n parser.add_argument('--H', type=int, default=-1,\n help='Max length of rollout')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--deterministic', action=\"store_true\")\n parser.add_argument('--record', action=\"store_true\")\n parser.add_argument('--env', type=str, default='manipulator')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n parser.add_argument('--task_env', action='store_true')\n args = parser.parse_args()\n\n simulate_policy(args)\n input('Press a key to finish the script')\n" }, { "alpha_fraction": 0.5661319494247437, "alphanum_fraction": 0.6025914549827576, "avg_line_length": 45.42448043823242, "blob_id": "f8ac428402115693f8c3d4f0d16319b24e7f99f6", "content_id": "2eb0b365ad9165f25d56a79ab266054f79418a07", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17828, "license_type": "permissive", "max_line_length": 175, "num_lines": 384, "path": "/scenarios/manipulator2d_gps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\nimport numpy as np\n\nfrom robolearn.old_agents import GPSAgent\nfrom robolearn.old_algos.gps.gps import GPS\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_fk import CostFK\n# from robolearn.costs.cost_state import CostState\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\nfrom robolearn.old_costs.cost_utils import evall1l2term\n# from robolearn.envs import BigmanEnv\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_dual_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\n# from robolearn.utils.iit.iit_robots_params import bigman_params\n# from robolearn.utils.robot_model import RobotModel\n# from robolearn.utils.tasks.bigman.reach_drill_utils import Reset_condition_bigman_drill_gazebo\n# from robolearn.utils.tasks.bigman.reach_drill_utils import create_bigman_drill_condition\n# from robolearn.utils.tasks.bigman.reach_drill_utils import create_drill_relative_pose\n# from robolearn.utils.tasks.bigman.reach_drill_utils import create_hand_relative_pose\n# from robolearn.utils.tasks.bigman.reach_drill_utils import spawn_drill_gazebo\n# from robolearn.utils.tasks.bigman.reach_drill_utils import task_space_torque_control_dual_demos, \\\n# load_task_space_torque_control_dual_demos\n# from robolearn.utils.traj_opt.traj_opt_mdreps import TrajOptMDREPS\nfrom robolearn.old_utils.traj_opt.traj_opt_lqr import TrajOptLQR\n\n\nfrom robolearn.old_envs.manipulator2d.manipulator2d_env import Manipulator2dEnv\nfrom robolearn.old_utils.print_utils import change_print_color\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been kill by the user!!\")\n manipulator2d_env.gz_ros_process.stop()\n os._exit(1)\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\n# ################## #\n# ################## #\n# ### PARAMETERS ### #\n# ################## #\n# ################## #\nlearning_algorithm = 'MDREPS'\n# Task parameters\nTs = 0.02\nEndTime = 5\nseed = 0\n\nrandom.seed(seed)\nnp.random.seed(seed)\n\n# ################### #\n# ################### #\n# ### ENVIRONMENT ### #\n# ################### #\n# ################### #\nchange_print_color.change('BLUE')\nprint(\"\\nCreating Manipulator2d environment...\")\n\n# Robot configuration\ninterface = 'ros'\nbody_part_active = 'RA'\nbody_part_sensed = 'RA'\ncommand_type = 'effort'\n\n# reset_condition_bigman_drill_gazebo_fcn = Reset_condition_bigman_drill_gazebo()\n\n# Create a BIGMAN ROS EnvInterface\nmanipulator2d_env = Manipulator2dEnv()\n\nraw_input('esperameeee')\n\n\naction_dim = manipulator2d_env.action_dim\nstate_dim = manipulator2d_env.state_dim\nobservation_dim = manipulator2d_env.obs_dim\n\nprint(\"Manipulator2d Environment OK. \\n action_dim=%02d, obs_dim=%02d, state_dim=%0.02d\" % (action_dim, observation_dim,\n state_dim))\n\n\n\n# ################# #\n# ################# #\n# ##### AGENT ##### #\n# ################# #\n# ################# #\nchange_print_color.change('CYAN')\nprint(\"\\nCreating Bigman Agent...\")\n\npolicy_params = {'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n 'network_params': {\n 'n_layers': 2, # Hidden layers??\n 'dim_hidden': [40, 40], # List of size per n_layers\n 'obs_names': manipulator2d_env.get_obs_info()['names'],\n 'obs_dof': manipulator2d_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n },\n # Initialization.\n 'init_var': 0.1, # Initial policy variance.\n 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # Solver hyperparameters.\n 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n 'batch_size': 15,\n 'lr': 0.001, # Base learning rate (by default it's fixed).\n 'lr_policy': 'fixed', # Learning rate policy.\n 'momentum': 0.9, # Momentum.\n 'weight_decay': 0.005, # Weight decay.\n 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # set gpu usage.\n 'use_gpu': 1, # Whether or not to use the GPU for training.\n 'gpu_id': 0,\n 'random_seed': 1,\n 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n 'gpu_mem_percentage': 0.2,\n # 'weights_file_prefix': EXP_DIR + 'policy',\n }\n\npolicy_opt = {\n 'type': PolicyOptTf,\n 'hyperparams': policy_params\n }\n\nmanipulator2d_agent = GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy_opt=policy_opt,\n agent_name=\"bigman_agent\")\n\n\n# ################# #\n# ################# #\n# ##### COSTS ##### #\n# ################# #\n# ################# #\n# Action Cost\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(action_dim) * 1e-4,\n 'target': None, # Target action value\n}\n\n# State Cost\n# target_distance_hand = np.zeros(6)\n\n# target_distance_object = np.zeros(6)\n# fk_l2_cost = {\n# 'type': CostFK,\n# 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'target_pose': target_distance_hand,\n# 'tgt_data_type': 'state', # 'state' or 'observation'\n# 'tgt_idx': manipulator2d_env.get_state_info(name='distance_hand')['idx'],\n# 'op_point_name': hand_name,\n# 'op_point_offset': hand_offset,\n# 'joints_idx': manipulator2d_env.get_state_info(name='link_position')['idx'],\n# 'joint_ids': bigman_params['joint_ids'][body_part_active],\n# 'robot_model': robot_model,\n# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n# #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'evalnorm': evall1l2term,\n# #'evalnorm': evallogl2term,\n# 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n# 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n# 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 1, # 10\n# }\n#\n# fk_l2_final_cost = {\n# 'type': CostFK,\n# 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'target_pose': target_distance_hand,\n# 'tgt_data_type': 'state', # 'state' or 'observation'\n# 'tgt_idx': manipulator2d_env.get_state_info(name='distance_hand')['idx'],\n# 'op_point_name': hand_name,\n# 'op_point_offset': hand_offset,\n# 'joints_idx': manipulator2d_env.get_state_info(name='link_position')['idx'],\n# 'joint_ids': bigman_params['joint_ids'][body_part_active],\n# 'robot_model': robot_model,\n# #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'evalnorm': evall1l2term,\n# #'evalnorm': evallogl2term,\n# 'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n# 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n# 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 50,\n# }\n\ncost_sum = {\n 'type': CostSum,\n 'costs': [act_cost], #, fk_l2_cost, fk_l2_final_cost],\n 'weights': [1.0e-1], # 1.5e-1, 1.0e-0],\n}\n\n\n# ########## #\n# ########## #\n# Conditions #\n# ########## #\n# ########## #\n\n# # REACH FROM TOP +0.2\n# q3[24] = np.deg2rad(-31.8328)\n# q3[25] = np.deg2rad(-39.7085)\n# q3[26] = np.deg2rad(11.934)\n# q3[27] = np.deg2rad(-81.7872)\n# q3[28] = np.deg2rad(43.8094)\n# q3[29] = np.deg2rad(-7.5974)\n# q3[30] = np.deg2rad(4.1521)\n# drill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.16, drill_y=drill_y-0.2276, drill_z=drill_z+0.17, drill_yaw=drill_yaw) # TODO: CHECK IF IT IS OK +0.17 WITH DRILL\n# condition3 = create_bigman_drill_condition(q3, drill_pose3, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# manipulator2d_env.add_condition(condition3)\n# # reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose3)\n# # drill_relative_poses.append(drill_pose3)\n\n# ######################## #\n# ######################## #\n# ## LEARNING ALGORITHM ## #\n# ######################## #\n# ######################## #\nchange_print_color.change('YELLOW')\nprint(\"\\nConfiguring learning algorithm...\\n\")\n\n# Learning params\nresume_training_itr = None # Resume from previous training iteration\n# data_files_dir = 'GPS_2017-09-01_15:22:55' # None # In case we want to resume from previous training\ndata_files_dir = None # 'GPS_2017-09-05_13:07:23' # None # In case we want to resume from previous training\n\ninit_traj_distr = {'type': init_pd,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active]))*0.3e-1, # Initial variance (Default:10)\n 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n #'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 2.0e-1, 2.0e-1, 2.0e-1])*1.0e+0,\n #'init_var': np.ones(7)*0.5,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Initial variance (Default:10)\n # 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,\n # 3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0, # Initial variance (Default:10)\n 'pos_gains': 0.001, #np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])*1.0e+1, # 0.001, # Position gains (Default:10)\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': manipulator2d_env.action_dim, # Total joints in state\n 'state_to_pd': 'joints', # Joints\n 'dDistance': 6,\n }\n\n# Dynamics\nlearned_dynamics = {'type': DynamicsLRPrior,\n 'regularization': 1e-6,\n 'prior': {\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n 'strength': 1.0, # Adjusts the strength of the prior.\n },\n }\n\ntraj_opt_method = {'type': TrajOptLQR,\n 'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n }\n\ngps_algo_hyperparams = [\n {'inner_iterations': 1,\n 'init_eta': 4.62,\n 'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add',\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n },\n]\n\nsample_on_policy = False\nuse_global_policy = True\ntest_after_iter = True\n#use_global_policy = False\n\n\ngps_hyperparams = {\n 'T': int(EndTime/Ts), # Total points\n 'dt': Ts,\n 'iterations': 25, # 100 # 2000 # GPS episodes, \"inner iterations\" --> K iterations\n 'test_after_iter': test_after_iter, # If test the learned policy after an iteration in the RL algorithm\n 'test_samples': 2, # Samples from learned policy after an iteration PER CONDITION (only if 'test_after_iter':True)\n # Samples\n 'num_samples': 6, # 20 # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'sample_on_policy': sample_on_policy, # Whether generate on-policy samples or off-policy samples\n #'noise_var_scale': np.array([5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2]), # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n #'noise_var_scale': np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*10, # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n #'smooth_noise_var': 5.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_var': 8.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': np.ones(manipulator2d_env.action_dim), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n 'cost': cost_sum,\n # Conditions\n 'conditions': len(manipulator2d_env.get_conditions()), # Total number of initial conditions\n 'train_conditions': range(len(manipulator2d_env.get_conditions())), # Indexes of conditions used for training\n 'test_conditions': range(len(manipulator2d_env.get_conditions())), # Indexes of conditions used for testing\n # KL step (epsilon)\n 'kl_step': 0.2, # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step in LQR)\n 'max_step_mult': 10.0, # Previous 23/08 -> 1.0 #3 # 10.0, # Max possible value of step multiplier (multiplies kl_step in LQR)\n # Others\n 'gps_algo_hyperparams': gps_algo_hyperparams,\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': True,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-6, #1e-2,# 1e-6, # Max value for x0sigma in trajectories # TODO: CHECK THIS VALUE, maybe it is too low\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization # CHECK THIS VALUE!!!, I AM USING ZERO!!\n 'use_global_policy': use_global_policy,\n 'data_files_dir': data_files_dir,\n}\n\nlearn_algo = GPS(manipulator2d_agent, manipulator2d_env, **gps_hyperparams)\n\nprint(\"Learning algorithm: %s OK\\n\" % type(learn_algo))\n\n# Optimize policy using learning algorithm\nraw_input(\"Press a key to start...\")\nprint(\"Running Learning Algorithm!!!\")\ntraining_successful = learn_algo.run(resume_training_itr)\nif training_successful:\n print(\"Learning Algorithm has finished SUCCESSFULLY!\")\nelse:\n print(\"Learning Algorithm has finished WITH ERRORS!\")\n\n\n# ############################## #\n# ############################## #\n# ## SAMPLE FROM FINAL POLICY ## #\n# ############################## #\n# ############################## #\n# if training_successful:\n# conditions_to_sample = gps_hyperparams['test_conditions']\n# change_print_color.change('GREEN')\n# n_samples = 1\n# noisy = False\n# sampler_hyperparams = {\n# 'noisy': noisy,\n# 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n# 'smooth_noise': False, # Whether or not to perform smoothing of noise\n# 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n# 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n# 'T': int(EndTime/Ts)*1, # Total points\n# 'dt': Ts\n# }\n# sampler = Sampler(learn_algo.cur[0].traj_distr, manipulator2d_env, **sampler_hyperparams)\n# print(\"Sampling from final policy!!!\")\n# sample_lists = list()\n# for cond_idx in conditions_to_sample:\n# raw_input(\"\\nSampling %d times from condition %d and with policy:%s (noisy:%s). \\n Press a key to continue...\" %\n# (n_samples, cond_idx, type(bigman_agent.policy), noisy))\n# sample_list = sampler.take_samples(n_samples, cond=cond_idx, noisy=noisy)\n# # costs = learn_algo._eval_conditions_sample_list_cost([sample_list])\n# # # print(costs)\n# # # raw_input('pppp')\n# # sample_lists.append(sample_list)\n#\n# manipulator2d_env.reset(time=1, cond=0)\n\n\n\n\nprint(\"The script has finished!\")\nos._exit(0)\n\n" }, { "alpha_fraction": 0.5116180777549744, "alphanum_fraction": 0.516265332698822, "avg_line_length": 35.248085021972656, "blob_id": "08f3fd5830b43adb4c22866d6d158ab041f6f6f2", "content_id": "0312b6d423cb8057e6ead212a193cf5688f94e2e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23670, "license_type": "permissive", "max_line_length": 91, "num_lines": 653, "path": "/robolearn/torch/policies/tanh_gaussian_composed_multi_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import math\nimport torch\nfrom torch import nn as nn\nfrom torch.distributions import Normal\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom collections import OrderedDict\nfrom itertools import chain\n\nLOG_SIG_MAX = 2\n# LOG_SIG_MIN = -20\nLOG_SIG_MIN = -3.0\n\nSIG_MAX = 7.38905609893065\nSIG_MIN = 0.049787068367863944\n\nLOG_MIX_COEFF_MIN = -10\nLOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5\nLOG_MIX_COEFF_MIN = -1\nLOG_MIX_COEFF_MAX = 1 #-4.5e-5\n\nEPS = 1e-12\n\n\nclass TanhGaussianComposedMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianComposedMultiPolicy(...)\n action, policy_dict = policy(obs)\n action, policy_dict = policy(obs, deterministic=True)\n action, policy_dict = policy(obs, return_log_prob=True)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n latent_dim,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n unshared_policy_hidden_sizes=None,\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=1e-2,\n output_w_init='xavier_normal',\n output_b_init_val=1e-2,\n pol_output_activation='linear',\n mix_output_activation='linear',\n final_pol_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n final_policy_layer_norm=False,\n epsilon=1e-6,\n softmax_weights=False,\n **kwargs\n ):\n self.save_init_params(locals())\n TanhGaussianComposedMultiPolicy.__init__(self)\n ExplorationPolicy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n self._latent_size = latent_dim\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n self._final_pol_output_activation = ptu.get_activation(final_pol_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n self._final_policy_layer_norm = final_policy_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n self._fpfcs = [] # Final Policy Layers\n self._norm_fpfcs = [] # Norm. Mixing Layers\n\n self._softmax_weights = softmax_weights\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n self._final_policy_modules = OrderedDict()\n self._final_policy_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n self.__setattr__(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, latent_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # ############# #\n # Mixing Layers #\n # ############# #\n mixture_in_size = in_size + latent_dim*self._n_subpolicies\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, latent_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n if softmax_weights:\n raise ValueError(\"Check if it is correct a softmax\")\n # self.mfc_softmax = nn.Softmax(dim=1)\n else:\n self.mfc_softmax = None\n\n # ################### #\n # Final Policy Layers #\n # ################### #\n final_pol_in_size = latent_dim\n if unshared_policy_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_policy_hidden_sizes):\n fpfc = nn.Linear(final_pol_in_size, next_size)\n ptu.layer_init(\n layer=fpfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"fpfc{}\".format(ii), fpfc)\n self._fpfcs.append(fpfc)\n # Add it to specific dictionaries\n self.add_final_policy_module(\"fpfc{}\".format(ii), fpfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"fpfc{}_norm\".format(ii), ln)\n self._norm_fpfcs.append(ln)\n self.add_final_policy_module(\"fpfc{}_norm\".format(ii), ln)\n final_pol_in_size = next_size\n\n # Unshared Final Policy Last Layer\n fpfc_last = nn.Linear(final_pol_in_size, action_dim)\n ptu.layer_init(\n layer=fpfc_last,\n option=output_w_init,\n activation=final_pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"fpfc_last\", fpfc_last)\n self.fpfc_last = fpfc_last\n # Add it to specific dictionaries\n self.add_final_policy_module(\"fpfc_last\", fpfc_last)\n\n # ########## #\n # Std Layers #\n # ########## #\n # Multi-Policy Log-Stds Last Layers\n fpfc_last_log_std = nn.Linear(final_pol_in_size, action_dim)\n ptu.layer_init(\n layer=fpfc_last_log_std,\n option=output_w_init,\n activation=final_pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"fpfc_last_log_std\", fpfc_last_log_std)\n self.fpfc_last_log_std = fpfc_last_log_std\n # Add it to specific dictionaries\n self.add_final_policy_module(\"fpfc_last_log_std\", fpfc_last_log_std)\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n self._epsilon = epsilon\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n self._compo_pol_idx = torch.tensor([self._n_subpolicies],\n dtype=torch.int64,\n device=ptu.device)\n\n def get_action(self, obs_np, **kwargs):\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool):\n return_log_prob (bool):\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._mixture_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n subpol_means = \\\n [self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))\n for pp in range(self._n_subpolicies)]\n subpols = torch.cat(subpol_means, dim=-1)\n\n if torch.isnan(subpols).any():\n raise ValueError('Some subpols are NAN:',\n subpols)\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = torch.cat([h.clone(), subpols], dim=-1) # N x dZ\n if not optimize_policies:\n mh = mh.detach()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n mpol_mean = self.mfc_last(mh)\n\n if self.mfc_softmax is not None:\n raise NotImplementedError\n # mixture_coeff = self.mfc_softmax(mixture_coeff)\n\n\n # Final Policy\n final_pol_inputs = [ii.unsqueeze(-2)\n for ii in (subpol_means + [mpol_mean])]\n fph = torch.cat(\n final_pol_inputs,\n dim=-2,\n )\n\n for ff, fpfc in enumerate(self._fpfcs):\n fph = fpfc(fph)\n\n if self._final_policy_layer_norm:\n fph = self._norm_mfcs[ff](fph)\n\n fph = self._hidden_activation(fph)\n\n means = self._final_pol_output_activation(\n self.fpfc_last(fph)\n )\n\n log_stds = self._final_pol_output_activation(\n self.fpfc_last_log_std(fph)\n )\n log_stds = torch.clamp(log_stds, LOG_SIG_MIN, LOG_SIG_MAX)\n stds = torch.exp(log_stds)\n variances = torch.pow(stds, 2)\n\n if pol_idx is None:\n index = self._compo_pol_idx\n else:\n index = self._pols_idxs[pol_idx]\n\n mean = \\\n torch.index_select(means, dim=-2, index=index).squeeze(-2)\n std = \\\n torch.index_select(stds, dim=-2, index=index).squeeze(-2)\n log_std = \\\n torch.index_select(log_stds, dim=-2, index=index).squeeze(-2)\n variance = \\\n torch.index_select(variances, dim=-2, index=index).squeeze(-2)\n\n means = \\\n torch.index_select(means, dim=-2, index=self._pols_idxs).squeeze(-2)\n stds = \\\n torch.index_select(stds, dim=-2, index=self._pols_idxs).squeeze(-2)\n log_stds = \\\n torch.index_select(log_stds, dim=-2, index=self._pols_idxs).squeeze(-2)\n variances = \\\n torch.index_select(variances, dim=-2, index=self._pols_idxs).squeeze(-2)\n\n pre_tanh_value = None\n log_prob = None\n entropy = None\n mean_action_log_prob = None\n log_probs = None\n pre_tanh_values = None\n\n mixture_coeff = ptu.ones((nbatch, self.n_heads, self.action_dim))\n\n if deterministic:\n action = torch.tanh(mean)\n actions = torch.tanh(means)\n else:\n noise = self._normal_dist.sample((nbatch,))\n pre_tanh_value = std*noise + mean\n pre_tanh_values = stds*noise.unsqueeze(1) + means\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n if return_log_prob:\n # Log probability: Main Policy\n log_prob = -((pre_tanh_value - mean) ** 2) / (2 * variance) \\\n - torch.log(std) - math.log(math.sqrt(2 * math.pi))\n log_prob -= torch.log(1. - action**2 + self._epsilon)\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n # Log probability: Sub-Policies\n log_probs = -((pre_tanh_values - means) ** 2) / (2 * variances) \\\n - torch.log(stds) - math.log(math.sqrt(2 * math.pi))\n log_probs -= torch.log(1. - actions**2 + self._epsilon)\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n if torch.isnan(action).any():\n raise ValueError('ACTION NAN')\n\n if torch.isnan(actions).any():\n raise ValueError('ACTION NAN')\n\n info_dict = dict(\n mean=mean,\n log_std=log_std,\n log_prob=log_prob,\n entropy=entropy,\n std=std,\n mean_action_log_prob=mean_action_log_prob,\n pre_tanh_value=pre_tanh_value,\n # log_mixture_coeff=log_mixture_coeff,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_means=means,\n pol_stds=stds,\n pol_log_stds=log_stds,\n pol_log_probs=log_probs,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n def log_action(self, actions, obs, pol_idx=None):\n raise NotImplementedError\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n\n # ####################### #\n # Final policy parameters #\n # ####################### #\n\n def final_policy_parameters(self):\n \"\"\"Returns an iterator over the final policy parameters.\n \"\"\"\n for name, param in self.named_final_policy_parameters():\n yield param\n\n def named_final_policy_parameters(self, **kwargs):\n \"\"\"Returns an iterator over final policy module parameters, yielding\n both the name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._final_policy_modules,\n self._final_policy_parameters,\n **kwargs)\n\n def add_final_policy_module(self, name, module):\n ptu.add_module(self._final_policy_modules, name, module)\n" }, { "alpha_fraction": 0.6194690465927124, "alphanum_fraction": 0.6251005530357361, "avg_line_length": 27.9069766998291, "blob_id": "c29dfd8d2ed21c358dd58fd9b2fef00f7cbe30b2", "content_id": "a077a481dcf76ba25f537b2e42fe438eaafc5057", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1243, "license_type": "permissive", "max_line_length": 72, "num_lines": 43, "path": "/examples/rl_algos/spinningup/plot_spinningup.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import subplots\nfrom robolearn.utils.plots import get_csv_data\n\n\ndef main(args):\n labels_to_plot = ['AverageEpRet']\n\n data = get_csv_data(args.file, labels_to_plot, space_separated=True)\n\n fig, axs = subplots(1)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Avg Return', fontweight='bold')\n\n max_iter = len(data[-1])\n if args.max_iter > 0:\n max_iter = args.max_iter\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa][:max_iter])\n ax.set_ylabel(labels_to_plot[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./progress.txt',\n help='path to the progress.csv file')\n parser.add_argument('--max_iter', '-i', type=int, default=-1,\n help='Unintentional id')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.5771244168281555, "alphanum_fraction": 0.590414822101593, "avg_line_length": 25.9891300201416, "blob_id": "377ab5c68e60e361a51e626e7849cc8bc0b87d5e", "content_id": "e68ca971f60e6f72675795465aed9f1f5998003f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2483, "license_type": "permissive", "max_line_length": 79, "num_lines": 92, "path": "/robolearn/utils/plots/core.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\nimport traceback\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\nimport pandas as pd\n\n\ndef get_csv_data(csv_file, labels, space_separated=False):\n data, all_labels = get_csv_data_and_labels(csv_file,\n space_separated=space_separated)\n\n for label in all_labels:\n print(label)\n print('***\\n'*3)\n n_data = data.shape[0]\n\n new_data = np.zeros((len(labels), n_data))\n\n # # Uncomment for debugging\n # print(all_labels)\n\n for ll, name in enumerate(labels):\n if name in all_labels:\n idx = all_labels.index(name)\n try:\n new_data[ll, :] = data[:, idx]\n except Exception:\n print(traceback.format_exc())\n print(\"Error with data in %s\" % csv_file)\n sys.exit(1)\n else:\n raise ValueError(\"Label '%s' not available in file '%s'\"\n % (name, csv_file))\n\n return new_data\n\n\ndef get_csv_data_and_labels(csv_file, space_separated=False):\n # Read from CSV file\n try:\n if space_separated:\n series = pd.read_csv(csv_file, delim_whitespace=True)\n else:\n series = pd.read_csv(csv_file)\n except Exception:\n print(traceback.format_exc())\n print(\"Error reading %s\" % csv_file)\n sys.exit(1)\n\n data = series.as_matrix()\n labels = list(series)\n\n return data, labels\n\n\ndef set_latex_plot():\n matplotlib.rcParams['pdf.fonttype'] = 42\n matplotlib.rcParams['ps.fonttype'] = 42\n # rc('font', **{'family': 'serif','serif':['Times']})\n matplotlib.rcParams['font.family'] = ['serif']\n matplotlib.rcParams['font.serif'] = ['Times New Roman']\n\n\ndef subplots(*args, **kwargs):\n fig, axs = plt.subplots(*args, **kwargs)\n\n if isinstance(axs, np.ndarray):\n for aa in axs:\n axis_format(aa)\n else:\n axis_format(axs)\n\n return fig, axs\n\n\ndef fig_format(fig):\n fig.subplots_adjust(hspace=0)\n fig.set_facecolor((1, 1, 1))\n\n\ndef axis_format(axis):\n # axis.tick_params(axis='x', labelsize=25)\n # axis.tick_params(axis='y', labelsize=25)\n axis.tick_params(axis='x', labelsize=15)\n axis.tick_params(axis='y', labelsize=15)\n\n # Background\n axis.xaxis.set_major_locator(MaxNLocator(integer=True))\n axis.xaxis.grid(color='white', linewidth=2)\n axis.set_facecolor((0.917, 0.917, 0.949))\n" }, { "alpha_fraction": 0.5754661560058594, "alphanum_fraction": 0.5887534618377686, "avg_line_length": 29.959091186523438, "blob_id": "e05de8aad1937e57b0f7a5bbc32a074c4ea349ca", "content_id": "1e71d489976eaff472897749915bb69c982ba2d5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13622, "license_type": "permissive", "max_line_length": 89, "num_lines": 440, "path": "/examples/rl_algos/sac/pusher_hiu_sac_promp.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch HIU-SAC on Pusher2D3DofGoalCompoEnv.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\n\nimport os\nfrom shutil import copyfile\nimport numpy as np\nimport torch\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn.torch.utils.data_management import MultiGoalReplayBuffer\n\nfrom robolearn_gym_envs.pybullet import Pusher2D3DofGoalCompoEnv\n\nfrom robolearn.torch.algorithms.rl_algos.sac.hiu_sac \\\n import HIUSAC\n\nfrom robolearn.torch.models import NNQFunction\nfrom robolearn.torch.models import NNVFunction\nfrom robolearn.torch.models import NNMultiQFunction\nfrom robolearn.torch.models import NNMultiVFunction\n\nfrom robolearn.torch.policies import TanhGaussianPrompMultiPolicy\n\nimport argparse\nimport joblib\n\nnp.set_printoptions(suppress=True, precision=4)\n# np.seterr(all='raise') # WARNING RAISE ERROR IN NUMPY\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 1\nPATHS_PER_EVAL = 2\nBATCH_SIZE = 128\n\nSEED = 110\n# NP_THREADS = 6\n\nSUBTASK = None\n\nPOLICY = TanhGaussianPrompMultiPolicy\n\nUSE_Q2 = True\nEXPLICIT_VF = False\n\nSOFTMAX_WEIGHTS = True\n# SOFTMAX_WEIGHTS = False\n# INIT_AVG_MIXING = True\nINIT_AVG_MIXING = False\n\nOPTIMIZER = 'adam'\n# OPTIMIZER = 'rmsprop'\n\nNORMALIZE_OBS = False\n\nexpt_params = dict(\n algo_name=HIUSAC.__name__,\n policy_name=POLICY.__name__,\n path_length=PATH_LENGTH,\n algo_params=dict(\n # Common RL algorithm params\n num_steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n num_epochs=500, # n_epochs\n num_updates_per_train_call=1, # How to many run algorithm train fcn\n num_steps_per_eval=PATHS_PER_EVAL * PATH_LENGTH,\n min_steps_start_train=BATCH_SIZE, # Min nsteps to start to train (or batch_size)\n min_start_eval=PATHS_PER_EPOCH * PATH_LENGTH, # Min nsteps to start to eval\n # EnvSampler params\n max_path_length=PATH_LENGTH, # max_path_length\n render=False,\n finite_horizon_eval=True,\n # SAC params\n action_prior='uniform',\n i_entropy_scale=2.0e-1,\n u_entropy_scale=[2.0e-1, 2.0e-1],\n auto_alphas=False,\n i_tgt_entro=1.0e+0,\n u_tgt_entros=[1.5e+0, 1.5e+0],\n # Learning rates\n optimizer=OPTIMIZER,\n policy_lr=3.e-4,\n qf_lr=3.e-4,\n # Soft target update\n i_soft_target_tau=5.e-3,\n u_soft_target_tau=5.e-3,\n # Regularization terms\n i_policy_mean_regu_weight=1.e-3,\n i_policy_std_regu_weight=1.e-3,\n i_policy_pre_activation_weight=0.e-3,\n i_policy_mixing_coeff_weight=1.e+0,\n u_policy_mean_regu_weight=[1.e-3, 1.e-3],\n u_policy_std_regu_weight=[1.e-3, 1.e-3],\n u_policy_pre_activation_weight=[0.e-3, 0.e-3],\n # Weight decays\n policy_weight_decay=0.e-5,\n q_weight_decay=0e-5,\n\n discount=0.99,\n reward_scale=1.0e-0,\n u_reward_scales=[1.0e-0, 1.0e-0],\n\n normalize_obs=NORMALIZE_OBS,\n ),\n replay_buffer_size=1e6,\n net_size=128,\n softmax_weights=SOFTMAX_WEIGHTS,\n # NN Normalizations\n # -----------------\n # input_norm=True,\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n # shared_layer_norm=True,\n # policies_layer_norm=True,\n # mixture_layer_norm=True,\n # NN Activations\n # --------------\n hidden_activation='relu',\n # hidden_activation='tanh',\n # hidden_activation='elu',\n # NN Initialization\n # -----------------\n # pol_hidden_w_init='xavier_normal',\n # pol_output_w_init='xavier_normal',\n pol_hidden_w_init='xavier_uniform',\n pol_output_w_init='xavier_uniform',\n # pol_hidden_w_init='uniform',\n # pol_output_w_init='uniform',\n # q_hidden_w_init='xavier_normal',\n # q_output_w_init='xavier_normal',\n q_hidden_w_init='xavier_uniform',\n q_output_w_init='xavier_uniform',\n # q_hidden_w_init='uniform',\n # q_output_w_init='uniform',\n # v_hidden_w_init='xavier_normal',\n # v_output_w_init='xavier_normal',\n v_hidden_w_init='xavier_uniform',\n v_output_w_init='xavier_uniform',\n # v_hidden_w_init='uniform',\n # v_output_w_init='uniform',\n)\n\nenv_params = dict(\n is_render=False,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n obs_with_ori=False,\n goal_pose=(0.65, 0.65),\n rdn_goal_pose=True,\n tgt_pose=(0.5, 0.25, 1.4660),\n rdn_tgt_object_pose=True,\n robot_config=None,\n rdn_robot_config=True,\n tgt_cost_weight=3.0,\n goal_cost_weight=3.0,\n ctrl_cost_weight=1.0e-3,\n no_task_weight=1.0,\n goal_tolerance=0.01,\n # max_time=PATH_LENGTH*DT,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n subtask=SUBTASK,\n seed=SEED,\n sequential_reward=False,\n)\n\n\ndef experiment(variant):\n\n # os.environ['OMP_NUM_THREADS'] = str(NP_THREADS)\n\n # Set seeds\n np.random.seed(variant['seed'])\n ptu.set_gpu_mode(variant['gpu'], gpu_id=0)\n ptu.seed(variant['seed'])\n variant['env_params']['seed'] = variant['seed']\n\n env = NormalizedBoxEnv(\n Pusher2D3DofGoalCompoEnv(**variant['env_params']),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n\n obs_dim = env.obs_dim\n action_dim = env.action_dim\n\n n_unintentional = 2\n\n if variant['load_dir']:\n params_file = os.path.join(variant['log_dir'], 'params.pkl')\n data = joblib.load(params_file)\n start_epoch = data['epoch']\n i_qf = data['qf']\n i_qf2 = data['qf2']\n u_qf = data['u_qf']\n u_qf2 = data['u_qf2']\n i_vf = data['i_vf']\n u_vf = data['u_vf']\n policy = data['policy']\n env._obs_mean = data['obs_mean']\n env._obs_var = data['obs_var']\n else:\n start_epoch = 0\n net_size = variant['net_size']\n\n u_qf = NNMultiQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n n_qs=n_unintentional,\n hidden_activation=variant['hidden_activation'],\n # shared_hidden_sizes=[net_size, net_size],\n shared_hidden_sizes=[net_size],\n # shared_hidden_sizes=[],\n unshared_hidden_sizes=[net_size, net_size],\n hidden_w_init=variant['q_hidden_w_init'],\n output_w_init=variant['q_output_w_init'],\n )\n i_qf = NNQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_activation=variant['hidden_activation'],\n hidden_sizes=[net_size, net_size],\n hidden_w_init=variant['q_hidden_w_init'],\n output_w_init=variant['q_output_w_init'],\n )\n\n if USE_Q2:\n u_qf2 = NNMultiQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n n_qs=n_unintentional,\n hidden_activation=variant['hidden_activation'],\n # shared_hidden_sizes=[net_size, net_size],\n shared_hidden_sizes=[net_size],\n # shared_hidden_sizes=[],\n unshared_hidden_sizes=[net_size, net_size],\n hidden_w_init=variant['q_hidden_w_init'],\n output_w_init=variant['q_output_w_init'],\n )\n i_qf2 = NNQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size],\n hidden_w_init=variant['q_hidden_w_init'],\n output_w_init=variant['q_output_w_init'],\n )\n else:\n u_qf2 = None\n i_qf2 = None\n\n if EXPLICIT_VF:\n u_vf = NNMultiVFunction(\n obs_dim=obs_dim,\n n_vs=n_unintentional,\n hidden_activation=variant['hidden_activation'],\n # shared_hidden_sizes=[net_size, net_size],\n shared_hidden_sizes=[net_size],\n # shared_hidden_sizes=[],\n unshared_hidden_sizes=[net_size, net_size],\n hidden_w_init=variant['q_hidden_w_init'],\n output_w_init=variant['q_output_w_init'],\n )\n i_vf = NNVFunction(\n obs_dim=obs_dim,\n hidden_sizes=[net_size, net_size],\n hidden_w_init=variant['q_hidden_w_init'],\n output_w_init=variant['q_output_w_init'],\n )\n else:\n u_vf = None\n i_vf = None\n\n policy = POLICY(\n obs_dim=obs_dim,\n action_dim=action_dim,\n n_policies=n_unintentional,\n hidden_activation=variant['hidden_activation'],\n # shared_hidden_sizes=[net_size, net_size],\n shared_hidden_sizes=[net_size],\n # shared_hidden_sizes=[],\n unshared_hidden_sizes=[net_size, net_size],\n unshared_mix_hidden_sizes=[net_size, net_size],\n stds=None,\n input_norm=variant['input_norm'],\n shared_layer_norm=variant['shared_layer_norm'],\n policies_layer_norm=variant['policies_layer_norm'],\n mixture_layer_norm=variant['mixture_layer_norm'],\n mixing_temperature=1.,\n softmax_weights=variant['softmax_weights'],\n hidden_w_init=variant['pol_hidden_w_init'],\n output_w_init=variant['pol_output_w_init'],\n )\n\n if INIT_AVG_MIXING:\n set_average_mixing(\n policy, n_unintentional, obs_dim,\n batch_size=50,\n total_iters=1000,\n )\n\n replay_buffer = MultiGoalReplayBuffer(\n max_replay_buffer_size=variant['replay_buffer_size'],\n obs_dim=obs_dim,\n action_dim=action_dim,\n reward_vector_size=n_unintentional,\n )\n\n algorithm = HIUSAC(\n env=env,\n policy=policy,\n u_qf1=u_qf,\n replay_buffer=replay_buffer,\n batch_size=BATCH_SIZE,\n i_qf1=i_qf,\n u_qf2=u_qf2,\n i_qf2=i_qf2,\n u_vf=u_vf,\n i_vf=i_vf,\n eval_env=env,\n save_environment=False,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda(ptu.device)\n\n algorithm.pretrain(10000)\n algorithm.train(start_epoch=start_epoch)\n\n return algorithm\n\n\ndef set_average_mixing(policy, n_unintentional, obs_dim, batch_size=50,\n total_iters=1000):\n mixing_optimizer = torch.optim.Adam(\n policy.mixing_parameters(),\n lr=1.0e-4,\n amsgrad=True,\n weight_decay=1e-5,\n )\n loss_fn = torch.nn.MSELoss(size_average=False)\n for ii in range(total_iters):\n dummy_obs = torch.randn((batch_size, obs_dim))\n mix_pred = policy(dummy_obs, deterministic=True)[1]['mixing_coeff']\n mix_des = torch.ones_like(mix_pred) * 1./n_unintentional\n loss = loss_fn(mix_pred, mix_des)\n mixing_optimizer.zero_grad()\n loss.backward()\n mixing_optimizer.step()\n # Set gradient to zero again\n mixing_optimizer.zero_grad()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--seed', type=int, default=SEED)\n parser.add_argument('--expt_name', type=str, default=None)\n parser.add_argument('--subtask', type=int, default=-1)\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=25)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--load_dir', type=str, default=None)\n # GPU arguments\n parser.add_argument('--gpu', action=\"store_true\")\n # Other arguments\n parser.add_argument('--render', action=\"store_true\")\n # Algo arguments\n parser.add_argument('--mix_weight', type=float, default=1.e+1)\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Experiment name\n if args.expt_name is None:\n expt_name = 'pusher'\n else:\n expt_name = args.expt_name\n\n # Default experiment parameters\n expt_variant = expt_params\n\n # Default environment parameters\n expt_variant['env_params'] = env_params\n expt_variant['env_params']['is_render'] = args.render\n\n # Custom parameters\n if args.subtask >= 0:\n expt_variant['env_params']['subtask'] = args.subtask\n\n expt_variant['log_dir'] = args.log_dir\n expt_variant['load_dir'] = args.load_dir\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n expt_variant['seed'] = args.seed\n\n # Algo params\n expt_variant['algo_params']['render'] = args.render\n expt_variant['algo_params']['i_policy_mixing_coeff_weight'] = args.mix_weight\n\n log_dir = setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n dir_filename = os.path.realpath(__file__)\n filename = os.path.split(dir_filename)[1]\n copyfile(dir_filename, os.path.join(log_dir, filename))\n\n algo = experiment(expt_variant)\n\n # input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.5324434638023376, "alphanum_fraction": 0.5380051732063293, "avg_line_length": 35.94520568847656, "blob_id": "78fc9de7df517b06f5f0b989c529114cab794111", "content_id": "1ef77ac078bf335041077ab77a98a11c56036347", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2697, "license_type": "permissive", "max_line_length": 88, "num_lines": 73, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/costs/cost_safe_distance.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.algorithms.rl_algos import get_ramp_multiplier\nfrom robolearn.algorithms.rl_algos import RAMP_CONSTANT\n\n\nclass CostSafeDistance(object):\n def __init__(self, state_idxs, safe_distances, wps=None,\n inside_costs=None, outside_costs=None,\n ramp_option=RAMP_CONSTANT, wp_final_multiplier=1.0,\n ):\n\n self._state_idxs = state_idxs\n\n self._safe_distances = [np.array(dist) for dist in safe_distances]\n\n if wps is None:\n wps = [np.ones(state_idx) for state_idx in state_idxs]\n self._wps = [np.array(wp) for wp in wps]\n\n if inside_costs is None:\n inside_costs = [np.ones(state_idx) for state_idx in state_idxs]\n self._inside_costs = inside_costs\n\n if outside_costs is None:\n outside_costs = [np.zeros(state_idx) for state_idx in state_idxs]\n self._outside_costs = outside_costs\n\n self._ramp_option = ramp_option\n self._wp_final_multiplier = wp_final_multiplier\n\n def eval(self, path):\n observations = path['observations']\n T = len(path['observations'])\n Du = path['actions'][-1].shape[0]\n Dx = path['observations'][-1].shape[0]\n\n l = np.zeros(T)\n lu = np.zeros((T, Du))\n lx = np.zeros((T, Dx))\n luu = np.zeros((T, Du, Du))\n lxx = np.zeros((T, Dx, Dx))\n lux = np.zeros((T, Du, Dx))\n\n for state_idx, safe_dist, wp, inside_cost, outside_cost in zip(\n self._state_idxs, self._safe_distances, self._wps,\n self._inside_costs, self._outside_costs\n ):\n x = observations[:, state_idx]\n dim_sensor = x.shape[-1]\n wpm = get_ramp_multiplier(\n self._ramp_option, T,\n wp_final_multiplier=self._wp_final_multiplier\n )\n wp = wp * np.expand_dims(wpm, axis=-1)\n\n # Compute binary region penalty.\n dist = safe_dist - np.abs(x)\n\n dist_violation = dist > 0\n is_penetrating = np.all(dist_violation, axis=1, keepdims=True)\n\n l += np.sum(dist * (dist_violation * is_penetrating * inside_cost\n + ~dist_violation * ~is_penetrating * outside_cost),\n axis=1)\n\n # Cost derivative of c*max(0, d - |x|) --> c*I(d-|x|)*-1*x/|x|\n idx = np.array(state_idx)\n lx[:, idx] += wp*(\n inside_cost * dist_violation * is_penetrating - 1 * x / np.abs(x) +\n outside_cost * ~dist_violation * ~is_penetrating - 1 * x / np.abs(x)\n )\n\n return l, lx, lu, lxx, luu, lux\n" }, { "alpha_fraction": 0.6081771850585938, "alphanum_fraction": 0.6081771850585938, "avg_line_length": 35.6875, "blob_id": "279ba9d829d20b8ed412b258b5f20f744cc4d1e6", "content_id": "cc1c6bdd194e99d110eb4d83492d2b4bdd2bf448", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "permissive", "max_line_length": 69, "num_lines": 16, "path": "/robolearn/models/policies/make_deterministic.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.models.policies.base import Policy\n\n\nclass MakeDeterministic(Policy):\n def __init__(self, stochastic_policy):\n self.stochastic_policy = stochastic_policy\n\n Policy.__init__(self, stochastic_policy.action_dim)\n\n def get_action(self, observation):\n return self.stochastic_policy.get_action(observation,\n deterministic=True)\n\n def get_actions(self, observations):\n return self.stochastic_policy.get_actions(observations,\n deterministic=True)\n" }, { "alpha_fraction": 0.4967455267906189, "alphanum_fraction": 0.506567656993866, "avg_line_length": 35.750431060791016, "blob_id": "2bd1cbbe0272434b0d2382a93f2870d87db9cafe", "content_id": "89fbfe1384462871b2499c76997efab65335f2db", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42557, "license_type": "permissive", "max_line_length": 91, "num_lines": 1158, "path": "/robolearn/torch/algorithms/rl_algos/sac/hiu_sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Pong's SAC implementation\n\nhttps://github.com/vitchyr/rlkit\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom collections import OrderedDict\nfrom itertools import chain\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils import eval_util\nfrom robolearn.utils.samplers import InPlacePathSampler\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.torch.policies import WeightedMultiPolicySelector\nfrom robolearn.utils.data_management.normalizer import RunningNormalizer\n\nimport tensorboardX\n\n# MAX_LOG_ALPHA = 9.21034037 # Alpha=10000 Before 01/07\nMAX_LOG_ALPHA = 6.2146080984 # Alpha=500 From 09/07\n\n\nclass HIUSAC(RLAlgorithm, TorchAlgorithm):\n \"\"\"\n Hierarchical Intentional-Unintentional Soft Actor Critic (HIU-SAC).\n \"\"\"\n def __init__(\n self,\n env,\n policy,\n u_qf1,\n\n replay_buffer,\n batch_size=1024,\n normalize_obs=False,\n eval_env=None,\n\n i_qf1=None,\n u_qf2=None,\n i_qf2=None,\n i_vf=None,\n u_vf=None,\n action_prior='uniform',\n\n i_entropy_scale=1.,\n u_entropy_scale=None,\n auto_alpha=True,\n i_tgt_entro=None,\n u_tgt_entros=None,\n\n policy_lr=3e-4,\n qf_lr=3e-4,\n\n i_policy_mean_regu_weight=1e-3,\n i_policy_std_regu_weight=1e-3,\n i_policy_pre_activation_weight=0.,\n i_policy_mixing_coeff_weight=1e-3,\n\n u_policy_mean_regu_weight=None,\n u_policy_std_regu_weight=None,\n u_policy_pre_activation_weight=None,\n\n policy_weight_decay=0.,\n q_weight_decay=0.,\n\n optimizer='adam',\n # optimizer='rmsprop',\n # optimizer='sgd',\n optimizer_kwargs=None,\n\n i_soft_target_tau=5e-3,\n u_soft_target_tau=5e-3,\n i_target_update_interval=1,\n u_target_update_interval=1,\n\n reward_scale=1.,\n u_reward_scales=None,\n\n save_replay_buffer=False,\n eval_deterministic=True,\n log_tensorboard=False,\n **kwargs\n ):\n\n # ###### #\n # Models #\n # ###### #\n\n # Exploration Policy\n self._policy = policy\n\n # Evaluation Policy\n if eval_deterministic:\n eval_policy = MakeDeterministic(self._policy)\n else:\n eval_policy = self._policy\n\n # Observation Normalizer\n if normalize_obs:\n self._obs_normalizer = RunningNormalizer(shape=env.obs_dim)\n else:\n self._obs_normalizer = None\n\n RLAlgorithm.__init__(\n self,\n explo_env=env,\n explo_policy=self._policy,\n eval_env=eval_env,\n eval_policy=eval_policy,\n obs_normalizer=self._obs_normalizer,\n **kwargs\n )\n\n # Number of Unintentional Tasks (Composable Tasks)\n self._n_unintentional = self._policy.n_heads\n\n # Evaluation Sampler (One for each unintentional)\n self.eval_u_samplers = [\n InPlacePathSampler(\n env=env,\n policy=WeightedMultiPolicySelector(self._policy, idx),\n total_samples=self.num_steps_per_eval,\n max_path_length=self.max_path_length,\n deterministic=True,\n )\n for idx in range(self._n_unintentional)\n ]\n\n # Intentional (Main Task) Q-functions\n self._i_qf1 = i_qf1\n self._i_qf2 = i_qf2\n if i_vf is None:\n self._i_vf = None\n self._i_target_vf = None\n self._i_target_qf1 = self._i_qf1.copy()\n self._i_target_qf2 = \\\n None if self._i_qf2 is None else self._i_qf2.copy()\n else:\n self._i_vf = i_vf\n self._i_target_vf = self._i_vf.copy()\n self._i_target_qf1 = None\n self._i_target_qf2 = None\n\n # Unintentional (Composable Tasks) Q-functions\n self._u_qf1 = u_qf1\n self._u_qf2 = u_qf2\n if u_vf is None:\n self._u_vf = None\n self._u_target_vf = None\n self._u_target_qf1 = self._u_qf1.copy()\n self._u_target_qf2 = self._u_qf2.copy()\n else:\n self._u_vf = u_vf\n self._u_target_vf = self._u_vf.copy()\n self._u_target_qf1 = None\n self._u_target_qf2 = None\n\n # Replay Buffer\n self.replay_buffer = replay_buffer\n self.batch_size = batch_size\n self.save_replay_buffer = save_replay_buffer\n\n # Soft-update rate for target V-functions\n self._i_soft_target_tau = i_soft_target_tau\n self._u_soft_target_tau = u_soft_target_tau\n self._i_target_update_interval = i_target_update_interval\n self._u_target_update_interval = u_target_update_interval\n\n # Important algorithm hyperparameters\n self._action_prior = action_prior\n self._i_entropy_scale = i_entropy_scale\n if u_entropy_scale is None:\n u_entropy_scale = [i_entropy_scale\n for _ in range(self._n_unintentional)]\n self._u_entropy_scale = torch.tensor(u_entropy_scale,\n dtype=torch.float32,\n device=ptu.device)\n\n # Desired Alphas\n self._auto_alphas = auto_alpha\n if i_tgt_entro is None:\n i_tgt_entro = -env.action_dim\n self._i_tgt_entro = torch.tensor([i_tgt_entro], dtype=torch.float32,\n device=ptu.device)\n if u_tgt_entros is None:\n u_tgt_entros = [i_tgt_entro for _ in range(self._n_unintentional)]\n self._u_tgt_entros = torch.tensor(u_tgt_entros, dtype=torch.float32,\n device=ptu.device)\n self._u_log_alphas = torch.zeros(self._n_unintentional,\n device=ptu.device, requires_grad=True)\n self._i_log_alpha = torch.zeros(1, device=ptu.device, requires_grad=True)\n\n # Reward Scales\n self.reward_scale = reward_scale\n if u_reward_scales is None:\n reward_scale = kwargs['reward_scale']\n u_reward_scales = [reward_scale\n for _ in range(self._n_unintentional)]\n self._u_reward_scales = torch.tensor(u_reward_scales, dtype=torch.float32,\n device=ptu.device)\n\n # ########## #\n # Optimizers #\n # ########## #\n if optimizer.lower() == 'adam':\n optimizer_class = optim.Adam\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n amsgrad=True,\n # amsgrad=False,\n )\n elif optimizer.lower() == 'rmsprop':\n optimizer_class = optim.RMSprop\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n\n )\n else:\n raise ValueError('Wrong optimizer')\n\n # Values optimizer\n vals_params_list = [self._u_qf1.parameters(), self._i_qf1.parameters()]\n if self._u_qf2 is not None:\n vals_params_list.append(self._u_qf2.parameters())\n if self._i_qf2 is not None:\n vals_params_list.append(self._i_qf2.parameters())\n if self._u_vf is not None:\n vals_params_list.append(self._u_vf.parameters())\n if self._i_vf is not None:\n vals_params_list.append(self._i_vf.parameters())\n vals_params = chain(*vals_params_list)\n\n self._values_optimizer = optimizer_class(\n vals_params,\n lr=qf_lr,\n weight_decay=q_weight_decay,\n **optimizer_kwargs\n )\n\n # Policy optimizer\n self._policy_optimizer = optimizer_class(\n self._policy.parameters(),\n lr=policy_lr,\n weight_decay=policy_weight_decay,\n **optimizer_kwargs\n )\n\n # Alpha optimizers\n self._alphas_optimizer = optimizer_class(\n [self._u_log_alphas, self._i_log_alpha],\n lr=policy_lr,\n **optimizer_kwargs\n )\n\n # Weights for policy regularization coefficients\n self._i_pol_mean_regu_weight = i_policy_mean_regu_weight\n self._i_pol_std_regu_weight = i_policy_std_regu_weight\n self._i_pol_pre_activ_weight = i_policy_pre_activation_weight\n self._i_pol_mixing_coeff_weight = i_policy_mixing_coeff_weight\n\n if u_policy_mean_regu_weight is None:\n u_policy_mean_regu_weight = [i_policy_mean_regu_weight\n for _ in range(self._n_unintentional)]\n self._u_policy_mean_regu_weight = \\\n torch.tensor(u_policy_mean_regu_weight, dtype=torch.float32,\n device=ptu.device)\n if u_policy_std_regu_weight is None:\n u_policy_std_regu_weight = [i_policy_std_regu_weight\n for _ in range(self._n_unintentional)]\n self._u_policy_std_regu_weight = \\\n torch.tensor(u_policy_std_regu_weight, dtype=torch.float32,\n device=ptu.device)\n if u_policy_pre_activation_weight is None:\n u_policy_pre_activation_weight = [\n i_policy_pre_activation_weight\n for _ in range(self._n_unintentional)\n ]\n self._u_policy_pre_activ_weight = \\\n torch.tensor(u_policy_pre_activation_weight, dtype=torch.float32,\n device=ptu.device)\n\n # Useful Variables for logging\n self.log_data = dict()\n self.log_data['Pol KL Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Qf Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Qf2 Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Vf Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Rewards'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Policy Entropy'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Policy Mean'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n self.explo_env.action_dim,\n ))\n self.log_data['Pol Log Std'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n self.explo_env.action_dim,\n ))\n self.log_data['Mixing Weights'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional,\n self.explo_env.action_dim,\n ))\n self.log_data['Alphas'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n\n # Tensorboard-like Logging\n self._log_tensorboard = log_tensorboard\n if log_tensorboard:\n self._summary_writer = \\\n tensorboardX.SummaryWriter(log_dir=logger.get_snapshot_dir())\n else:\n self._summary_writer = None\n\n def pretrain(self, n_pretrain_samples):\n # We do not require any pretrain (I think...)\n observation = self.explo_env.reset()\n for ii in range(n_pretrain_samples):\n action = self.explo_env.action_space.sample()\n # Interact with environment\n next_ob, reward, terminal, env_info = (\n self.explo_env.step(action)\n )\n agent_info = None\n\n # Increase counter\n self._n_env_steps_total += 1\n # Create np.array of obtained terminal and reward\n terminal = np.array([terminal])\n reward = np.array([reward])\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_ob,\n agent_info=agent_info,\n env_info=env_info,\n )\n observation = next_ob\n\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n if terminal:\n self.explo_env.reset()\n\n def _do_training(self):\n # Get batch of samples\n batch = self.get_batch()\n\n # Get common data from batch\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n\n # ####################### #\n # Get All Obs Policy Info #\n # ####################### #\n\n # One pass for both s and s' instead of two\n obs_combined = torch.cat((obs, next_obs), dim=0)\n i_all_actions, policy_info = self._policy(\n obs_combined,\n deterministic=False,\n return_log_prob=True,\n pol_idx=None,\n optimize_policies=False,\n )\n # Intentional policy info\n i_new_actions = i_all_actions[:self.batch_size]\n i_next_actions = i_all_actions[self.batch_size:].detach()\n\n i_new_log_pi = policy_info['log_prob'][:self.batch_size]\n i_next_log_pi = policy_info['log_prob'][self.batch_size:].detach()\n\n i_new_policy_mean = policy_info['mean'][:self.batch_size]\n i_new_policy_log_std = policy_info['log_std'][:self.batch_size]\n i_new_pre_tanh_value = policy_info['pre_tanh_value'][:self.batch_size]\n new_mixing_coeff = policy_info['mixing_coeff'][:self.batch_size]\n\n # Unintentional policy info\n u_new_actions = policy_info['pol_actions'][:self.batch_size]\n u_new_log_pi = policy_info['pol_log_probs'][:self.batch_size]\n u_new_policy_mean = policy_info['pol_means'][:self.batch_size]\n u_new_policy_log_std = policy_info['pol_log_stds'][:self.batch_size]\n u_new_pre_tanh_value = policy_info['pol_pre_tanh_values'][:self.batch_size]\n\n u_next_actions = policy_info['pol_actions'][self.batch_size:].detach()\n u_next_log_pi = policy_info['pol_log_probs'][self.batch_size:].detach()\n\n # Alphas\n ialpha = self._i_entropy_scale*torch.clamp(self._i_log_alpha,\n max=MAX_LOG_ALPHA).exp()\n ualphas = (self._u_entropy_scale*torch.clamp(self._u_log_alphas,\n max=MAX_LOG_ALPHA).exp()\n ).unsqueeze(1)\n\n # ########################## #\n # Unintentional Critics Step #\n # ########################## #\n u_rewards = \\\n (batch['reward_vectors'] * self._u_reward_scales).unsqueeze(-1)\n u_terminals = (batch['terminal_vectors']).unsqueeze(-1)\n\n # Unintentional Q1(s', a')\n u_next_q1 = torch.cat(\n [\n self._u_target_qf1(next_obs, u_next_actions[:, uu, :])[0][uu].unsqueeze(1)\n for uu in range(self._n_unintentional)\n ],\n dim=1\n )\n if self._u_target_qf2 is not None:\n # Unintentional Q2(s', a')\n u_next_q2 = torch.cat(\n [\n self._u_target_qf2(next_obs, u_next_actions[:, uu, :])[0][uu].unsqueeze(1)\n for uu in range(self._n_unintentional)\n ],\n dim=1\n )\n # Minimum Unintentional Double-Q\n u_next_q = torch.min(u_next_q1, u_next_q2)\n else:\n u_next_q = u_next_q1\n\n # Unintentional Vtarget(s')\n u_next_v = u_next_q - ualphas*u_next_log_pi\n\n # Calculate Bellman Backup for Unintentional Q-values\n u_q_backup = u_rewards + (1. - u_terminals) * self.discount * u_next_v\n u_q_backup = u_q_backup.detach()\n\n # Unintentional Q1(s,a)\n u_q_pred = torch.cat([qq.unsqueeze(1)\n for qq in self._u_qf1(obs, actions)[0]],\n dim=1)\n\n # Unintentional QF1 Losses: Mean Squared Bellman Equation (MSBE)\n u_qf1_loss = \\\n 0.5*torch.mean((u_q_pred - u_q_backup)**2, dim=0).squeeze(-1)\n # MSBE Q1-Loss for all unintentional policies.\n total_u_qf1_loss = torch.sum(u_qf1_loss)\n\n if self._u_qf2 is not None:\n # Unintentional Q2(s,a)\n u_q2_pred = torch.cat([qq.unsqueeze(1)\n for qq in self._u_qf2(obs, actions)[0]],\n dim=1)\n\n # Unintentional QF2 Losses: Mean Squared Bellman Equation (MSBE)\n u_qf2_loss = 0.5*torch.mean((u_q2_pred - u_q_backup)**2,\n dim=0).squeeze(-1)\n # MSBE Q2-Loss for all unintentional policies.\n total_u_qf2_loss = torch.sum(u_qf2_loss)\n else:\n u_qf2_loss = 0\n total_u_qf2_loss = 0\n\n # ####################### #\n # Intentional Critic Step #\n # ####################### #\n i_rewards = batch['rewards'] * self.reward_scale\n i_terminals = batch['terminals']\n\n # Intentional Q1(s', a')\n i_next_q1 = self._i_target_qf1(next_obs, i_next_actions)[0]\n\n if self._i_target_qf2 is not None:\n # Intentional Q2(s', a')\n i_next_q2 = self._i_target_qf2(next_obs, i_next_actions)[0]\n\n # Minimum Unintentional Double-Q\n i_next_q = torch.min(i_next_q1, i_next_q2)\n else:\n i_next_q = i_next_q1\n\n # Intentional Vtarget(s')\n i_next_v = i_next_q - ialpha*i_next_log_pi\n\n # Calculate Bellman Backup for Intentional Q-value\n i_q_backup = i_rewards + (1. - i_terminals) * self.discount * i_next_v\n\n # Intentional Q1(s,a)\n i_q_pred = self._i_qf1(obs, actions)[0]\n\n # Intentional QF1 Loss: Mean Squared Bellman Equation (MSBE)\n i_qf1_loss = 0.5*torch.mean((i_q_backup.detach() - i_q_pred)**2)\n\n if self._i_qf2 is not None:\n # Intentional Q2(s,a)\n i_q2_pred = self._i_qf2(obs, actions)[0]\n\n # Intentional QF2 Loss: Mean Squared Bellman Equation (MSBE)\n i_qf2_loss = 0.5*torch.mean((i_q_backup.detach() - i_q2_pred)**2)\n else:\n i_qf2_loss = 0\n\n # #################### #\n # Unintentional Actors #\n # #################### #\n if self._action_prior == 'normal':\n raise NotImplementedError\n else:\n u_policy_prior_log_probs = 0.0 # Uniform prior\n\n # Unintentional Q1(s, a)\n u_q1_new_actions = torch.cat(\n [self._u_qf1(obs, u_new_actions[:, uu, :])[0][uu].unsqueeze(1)\n for uu in range(self._n_unintentional)\n ],\n dim=1\n )\n if self._u_qf2 is not None:\n # Unintentional Q2(s, a)\n u_q2_new_actions = torch.cat(\n [self._u_qf2(obs, u_new_actions[:, uu, :])[0][uu].unsqueeze(1)\n for uu in range(self._n_unintentional)\n ],\n dim=1\n )\n # Minimum Unintentional Double-Q\n u_q_new_actions = torch.min(u_q1_new_actions, u_q2_new_actions)\n else:\n u_q_new_actions = u_q1_new_actions\n\n # # Get Unintentional A(s, a)\n # u_advantage_new_actions = u_q_new_actions - u_v_pred.detach()\n\n # Get Unintentional Policies KL loss: - (E_a[Q(s, a) + H(.)])\n u_policy_kl_loss = -torch.mean(\n u_q_new_actions - ualphas*u_new_log_pi\n + u_policy_prior_log_probs,\n dim=0\n ).squeeze(-1)\n # u_policy_kl_loss = -torch.mean(\n # u_advantage_new_actions - u_log_pi*ualphas,\n # dim=0).squeeze(-1)\n\n # Get Unintentional Policies regularization loss\n u_mean_reg_loss = self._u_policy_mean_regu_weight * \\\n (u_new_policy_mean ** 2).mean(dim=0).mean(dim=-1)\n u_std_reg_loss = self._u_policy_std_regu_weight * \\\n (u_new_policy_log_std ** 2).mean(dim=0).mean(dim=-1)\n u_pre_activation_reg_loss = \\\n self._u_policy_pre_activ_weight * \\\n (u_new_pre_tanh_value**2).sum(dim=-1).mean(dim=0).mean(dim=-1)\n u_policy_regu_loss = (u_mean_reg_loss + u_std_reg_loss\n + u_pre_activation_reg_loss)\n\n # Get Unintentional Policies Total loss\n u_policy_loss = (u_policy_kl_loss + u_policy_regu_loss)\n total_u_policy_loss = torch.sum(u_policy_loss)\n\n # ################# #\n # Intentional Actor #\n # ################# #\n if self._action_prior == 'normal':\n raise NotImplementedError\n else:\n i_policy_prior_log_probs = 0.0 # Uniform prior\n\n # Intentional Q1(s, a)\n i_q1_new_actions = self._i_qf1(obs, i_new_actions)[0]\n\n if self._i_qf2 is not None:\n # Intentional Q2(s, a)\n i_q2_new_actions = self._i_qf2(obs, i_new_actions)[0]\n\n # Minimum Intentional Double-Q\n i_q_new_actions = torch.min(i_q1_new_actions, i_q2_new_actions)\n else:\n i_q_new_actions = i_q1_new_actions\n\n # # Intentional A(s, a)\n # i_advantage_new_actions = i_q_new_actions - i_v_pred.detach()\n\n # Intentional policy KL loss: - (E_a[Q(s, a) + H(.)])\n i_policy_kl_loss = -torch.mean(\n i_q_new_actions - ialpha*i_new_log_pi\n + i_policy_prior_log_probs\n )\n # i_policy_kl_loss = -torch.mean(\n # i_advantage_new_actions - i_log_pi*ialpha\n # )\n\n # Intentional policy regularization loss\n i_mean_reg_loss = self._i_pol_mean_regu_weight * \\\n (i_new_policy_mean ** 2).mean()\n i_std_reg_loss = self._i_pol_std_regu_weight * \\\n (i_new_policy_log_std ** 2).mean()\n i_pre_activation_reg_loss = \\\n self._i_pol_pre_activ_weight * \\\n (i_new_pre_tanh_value**2).sum(dim=-1).mean()\n mixing_coeff_loss = self._i_pol_mixing_coeff_weight * \\\n 0.5*((new_mixing_coeff - 1/self._n_unintentional)**2).mean()\n i_policy_regu_loss = (i_mean_reg_loss + i_std_reg_loss\n + i_pre_activation_reg_loss + mixing_coeff_loss)\n\n # Intentional Policy Total loss\n i_policy_loss = (i_policy_kl_loss + i_policy_regu_loss)\n\n # Update both Intentional and Unintentional Policies at the same time\n self._policy_optimizer.zero_grad()\n total_iu_loss = total_u_policy_loss + i_policy_loss\n total_iu_loss.backward()\n self._policy_optimizer.step()\n\n # ############### #\n # V-function Step #\n # ############### #\n if self._u_vf is None:\n u_v_pred = u_q_new_actions - ualphas*u_new_log_pi\n u_vf_loss = 0\n total_u_vf_loss = 0\n else:\n u_v_pred = torch.cat([vv.unsqueeze(1)\n for vv in self._u_vf(obs)[0]],\n dim=1)\n\n # Calculate Bellman Backup for Unintentional V-values\n u_v_backup = u_q_new_actions - ualphas*u_new_log_pi + u_policy_prior_log_probs\n u_v_backup = u_v_backup.detach()\n\n u_vf_loss = \\\n 0.5*torch.mean((u_v_backup - u_v_pred)**2, dim=0).squeeze(-1)\n total_u_vf_loss = torch.sum(u_vf_loss)\n\n if self._i_vf is None:\n i_v_pred = i_q_new_actions - ialpha*i_new_log_pi\n i_vf_loss = 0\n else:\n i_v_pred = self._i_vf(obs)[0]\n # Calculate Bellman Backup for V-value\n i_v_backup = i_q_new_actions - ialpha*i_new_log_pi + i_policy_prior_log_probs\n i_v_backup = i_v_backup.detach()\n # Calculate Intentional Vf Loss\n i_vf_loss = 0.5*torch.mean((i_v_backup - i_v_pred)**2)\n\n # Update both Intentional and Unintentional Values at the same time\n self._values_optimizer.zero_grad()\n values_loss = (total_u_qf1_loss + total_u_qf2_loss +\n i_qf1_loss + i_qf2_loss +\n total_u_vf_loss + i_vf_loss)\n values_loss.backward()\n self._values_optimizer.step()\n\n # ###################### #\n # Update Target Networks #\n # ###################### #\n if self._n_total_train_steps % self._u_target_update_interval == 0:\n if self._u_target_vf is None:\n ptu.soft_update_from_to(\n source=self._u_qf1,\n target=self._u_target_qf1,\n tau=self._u_soft_target_tau\n )\n if self._u_target_qf2 is not None:\n ptu.soft_update_from_to(\n source=self._u_qf2,\n target=self._u_target_qf2,\n tau=self._u_soft_target_tau\n )\n else:\n ptu.soft_update_from_to(\n source=self._u_vf,\n target=self._u_target_vf,\n tau=self._u_soft_target_tau\n )\n\n if self._n_total_train_steps % self._i_target_update_interval == 0:\n if self._i_target_vf is None:\n ptu.soft_update_from_to(\n source=self._i_qf1,\n target=self._i_target_qf1,\n tau=self._i_soft_target_tau\n )\n if self._i_target_qf2 is not None:\n ptu.soft_update_from_to(\n source=self._i_qf2,\n target=self._i_target_qf2,\n tau=self._i_soft_target_tau\n )\n else:\n ptu.soft_update_from_to(\n source=self._i_vf,\n target=self._i_target_vf,\n tau=self._i_soft_target_tau\n )\n\n # ################################## #\n # Intentional & Unintentional Alphas #\n # ################################## #\n if self._auto_alphas:\n u_log_alphas = self._u_log_alphas.clamp(max=MAX_LOG_ALPHA)\n u_alpha_loss = -(u_log_alphas.unsqueeze(-1) *\n (u_new_log_pi + self._u_tgt_entros.unsqueeze(-1)\n ).detach()\n ).mean()\n\n i_log_alpha = self._i_log_alpha.clamp(max=MAX_LOG_ALPHA)\n i_alpha_loss = -(i_log_alpha * (\n i_new_log_pi + self._i_tgt_entro).detach()).mean()\n self._alphas_optimizer.zero_grad()\n total_alpha_loss = u_alpha_loss + i_alpha_loss\n total_alpha_loss.backward()\n self._alphas_optimizer.step()\n\n # ############### #\n # LOG Useful Data #\n # ############### #\n step_idx = self._n_epoch_train_steps\n self.log_data['Policy Entropy'][step_idx, :-1] = \\\n ptu.get_numpy(-u_new_log_pi.mean(dim=0).squeeze(-1))\n self.log_data['Policy Entropy'][step_idx, -1] = \\\n ptu.get_numpy(-i_new_log_pi.mean(dim=0))\n\n self.log_data['Pol Log Std'][step_idx, :-1, :] = \\\n ptu.get_numpy(u_new_policy_log_std.mean(dim=0))\n self.log_data['Pol Log Std'][step_idx, -1, :] = \\\n ptu.get_numpy(i_new_policy_log_std.mean(dim=0))\n\n self.log_data['Policy Mean'][step_idx, :-1, :] = \\\n ptu.get_numpy(u_new_policy_mean.mean(dim=0))\n self.log_data['Policy Mean'][step_idx, -1, :] = \\\n ptu.get_numpy(i_new_policy_mean.mean(dim=0))\n\n self.log_data['Pol KL Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_policy_kl_loss)\n self.log_data['Pol KL Loss'][step_idx, -1] = \\\n ptu.get_numpy(i_policy_kl_loss)\n\n self.log_data['Qf Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_qf1_loss)\n self.log_data['Qf Loss'][step_idx, -1] = \\\n ptu.get_numpy(i_qf1_loss)\n\n if self._u_qf2 is not None:\n self.log_data['Qf2 Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_qf2_loss)\n if self._i_qf2 is not None:\n self.log_data['Qf2 Loss'][step_idx, -1] = \\\n ptu.get_numpy(i_qf2_loss)\n\n if self._u_vf is not None:\n self.log_data['Vf Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_vf_loss)\n if self._i_vf is not None:\n self.log_data['Vf Loss'][step_idx, -1] = \\\n ptu.get_numpy(i_vf_loss)\n\n self.log_data['Rewards'][step_idx, :-1] = \\\n ptu.get_numpy(u_rewards.mean(dim=0).squeeze(-1))\n self.log_data['Rewards'][step_idx, -1] = \\\n ptu.get_numpy(i_rewards.mean(dim=0).squeeze(-1))\n\n self.log_data['Mixing Weights'][step_idx, :, :] = \\\n ptu.get_numpy(new_mixing_coeff.mean(dim=0))\n\n self.log_data['Alphas'][step_idx, :-1] = \\\n ptu.get_numpy(ualphas.squeeze(-1))\n self.log_data['Alphas'][step_idx, -1] = \\\n ptu.get_numpy(ialpha)\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'TrainingI/qf_loss',\n ptu.get_numpy(i_qf1_loss),\n self._n_env_steps_total\n )\n if self._i_qf2 is not None:\n self._summary_writer.add_scalar(\n 'TrainingI/qf2_loss',\n ptu.get_numpy(i_qf2_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/avg_reward',\n ptu.get_numpy(i_rewards.mean()),\n self._n_env_steps_total\n )\n # self._summary_writer.add_scalar(\n # 'TrainingI/avg_advantage',\n # ptu.get_numpy(i_advantage_new_actions.mean()),\n # self._n_env_steps_total\n # )\n self._summary_writer.add_scalar(\n 'TrainingI/policy_loss',\n ptu.get_numpy(i_policy_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/policy_entropy',\n ptu.get_numpy(-i_new_log_pi.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/policy_mean',\n ptu.get_numpy(i_new_policy_mean.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/policy_std',\n ptu.get_numpy(i_new_policy_log_std.mean().exp()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/q_vals',\n ptu.get_numpy(i_q_new_actions.mean()),\n self._n_env_steps_total\n )\n\n def _not_do_training(self):\n return\n\n @property\n def torch_models(self):\n networks_list = [\n self._policy,\n self._i_qf1,\n self._u_qf1,\n ]\n if self._i_qf2 is not None:\n networks_list.append(self._i_qf2)\n if self._i_vf is not None:\n networks_list.append(self._i_vf)\n if self._i_target_qf1 is not None:\n networks_list.append(self._i_target_qf1)\n if self._i_target_qf2 is not None:\n networks_list.append(self._i_target_qf2)\n if self._i_target_vf is not None:\n networks_list.append(self._i_target_vf)\n if self._u_qf2 is not None:\n networks_list.append(self._u_qf2)\n if self._u_vf is not None:\n networks_list.append(self._u_vf)\n if self._u_target_qf1 is not None:\n networks_list.append(self._u_target_qf1)\n if self._u_target_qf2 is not None:\n networks_list.append(self._u_target_qf2)\n if self._u_target_vf is not None:\n networks_list.append(self._u_target_vf)\n\n return networks_list\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Stuff to save in file.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n self._epoch_plotter.save_figure(epoch)\n\n snapshot = RLAlgorithm.get_epoch_snapshot(self, epoch)\n\n snapshot.update(\n policy=self._policy,\n qf=self._i_qf1,\n qf2=self._i_qf2,\n target_qf=self._i_target_qf1,\n vf=self._i_vf,\n target_vf=self._i_target_vf,\n u_qf=self._u_qf1,\n u_qf2=self._u_qf2,\n u_vf=self._u_vf,\n target_u_qf1=self._u_target_qf1,\n target_u_qf2=self._u_target_qf2,\n target_u_vf=self._u_target_vf,\n )\n\n if self.explo_env.online_normalization or self.explo_env.normalize_obs:\n snapshot.update(\n obs_mean=self.explo_env.obs_mean,\n obs_var=self.explo_env.obs_var,\n )\n\n # Observation Normalizer\n snapshot.update(\n obs_normalizer=self._obs_normalizer,\n )\n\n # Replay Buffer\n if self.save_replay_buffer:\n snapshot.update(\n replay_buffer=self.replay_buffer,\n )\n\n return snapshot\n\n def _update_logging_data(self):\n max_step = max(self._n_epoch_train_steps, 1)\n\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n\n # Unintentional info\n for uu in range(self._n_unintentional):\n self.eval_statistics['[U-%02d] Policy Entropy' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Policy Entropy'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Qf Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf Loss'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Vf Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Vf Loss'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Pol KL Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol KL Loss'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Rewards' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Rewards'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Mixing Weights' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Mixing Weights'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Alphas' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Alphas'][:max_step, uu]\n ))\n\n for aa in range(self.explo_env.action_dim):\n self.eval_statistics['[U-%02d] Policy Std [%02d]' % (uu, aa)] = \\\n np.nan_to_num(np.mean(\n np.exp(self.log_data['Pol Log Std'][:max_step, uu, aa])\n ))\n self.eval_statistics['[U-%02d] Policy Mean [%02d]' % (uu, aa)] = \\\n np.nan_to_num(np.mean(\n self.log_data['Policy Mean'][:max_step, uu, aa]\n ))\n\n if self._u_qf2 is not None:\n self.eval_statistics['[U-%02d] Qf2 Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf2 Loss'][:max_step, uu]\n ))\n\n # Intentional info\n self.eval_statistics['[I] Policy Entropy'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Policy Entropy'][:max_step, -1]\n ))\n self.eval_statistics['[I] Qf Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf Loss'][:max_step, -1]\n ))\n if self._i_qf2 is not None:\n self.eval_statistics['[I] Qf2 Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf2 Loss'][:max_step, -1]\n ))\n self.eval_statistics['[I] Vf Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Vf Loss'][:max_step, -1]\n ))\n self.eval_statistics['[I] Pol KL Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol KL Loss'][:max_step, -1]\n ))\n self.eval_statistics['[I] Rewards'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Rewards'][:max_step, -1]\n ))\n self.eval_statistics['[I] Alphas'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Alphas'][:max_step, -1]\n ))\n for aa in range(self.explo_env.action_dim):\n self.eval_statistics['[I] Policy Std'] = \\\n np.nan_to_num(np.mean(\n np.exp(self.log_data['Pol Log Std'][:max_step, -1, aa])\n ))\n self.eval_statistics['[I] Policy Mean'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Policy Mean'][:max_step, -1, aa]\n ))\n\n def evaluate(self, epoch):\n statistics = OrderedDict()\n self._update_logging_data()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n # Interaction Paths for each unintentional policy\n test_paths = [None for _ in range(self._n_unintentional)]\n for unint_idx in range(self._n_unintentional):\n logger.log(\"[U-%02d] Collecting samples for evaluation\" % unint_idx)\n test_paths[unint_idx] = \\\n self.eval_u_samplers[unint_idx].obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths[unint_idx], stat_prefix=\"[U-%02d] Test\" % unint_idx,\n ))\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'EvaluationU%02d/avg_return' % unint_idx,\n statistics['[U-%02d] Test Returns Mean' % unint_idx],\n self._n_epochs\n )\n\n self._summary_writer.add_scalar(\n 'EvaluationU%02d/avg_reward' % unint_idx,\n statistics['[U-%02d] Test Rewards Mean' % unint_idx],\n self._n_epochs\n )\n\n # Interaction Paths for the intentional policy\n logger.log(\"[I] Collecting samples for evaluation\")\n i_test_paths = self.eval_sampler.obtain_samples()\n statistics.update(eval_util.get_generic_path_information(\n i_test_paths, stat_prefix=\"[I] Test\",\n ))\n\n if self._exploration_paths:\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n else:\n statistics.update(eval_util.get_generic_path_information(\n i_test_paths, stat_prefix=\"Exploration\",\n ))\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'EvaluationI/avg_return',\n statistics['[I] Test Returns Mean'],\n self._n_epochs\n )\n\n self._summary_writer.add_scalar(\n 'EvaluationI/avg_reward',\n statistics['[I] Test Rewards Mean'] * self.reward_scale,\n self._n_epochs\n )\n\n if hasattr(self.explo_env, \"log_diagnostics\"):\n pass\n # # TODO: CHECK ENV LOG_DIAGNOSTICS\n # print('TODO: WE NEED LOG_DIAGNOSTICS IN ENV')\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n # Epoch Plotter\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n\n # Reset log_data\n for key in self.log_data.keys():\n self.log_data[key].fill(0)\n\n def get_batch(self):\n batch = self.replay_buffer.random_batch(self.batch_size)\n\n if self._obs_normalizer is not None:\n batch['observations'] = \\\n self._obs_normalizer.normalize(batch['observations'])\n batch['next_observations'] = \\\n self._obs_normalizer.normalize(batch['next_observations'])\n\n return batch\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_observation,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n # Update observation normalizer (if applicable)\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n RLAlgorithm._handle_step(\n self,\n observation=observation,\n action=action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _end_rollout(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n\n self.replay_buffer.terminate_episode()\n\n RLAlgorithm._end_rollout(self)\n" }, { "alpha_fraction": 0.8518518805503845, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 30.5, "blob_id": "a45ed77355533e2fa6f4a234bcafbeb35c3a329e", "content_id": "d057fdda975f3f13343c8760713eff6b963dce62", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "permissive", "max_line_length": 49, "num_lines": 6, "path": "/robolearn/models/policies/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .base import Policy\nfrom .base import ExplorationPolicy\nfrom .base import SerializablePolicy\n\nfrom .make_deterministic import MakeDeterministic\nfrom .random_policy import RandomPolicy\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 24, "blob_id": "a887aac41c9eb1ce8b011c9af89e9346327281a4", "content_id": "1336aa72a6795fdb9efdf5e4aea4ee9feb496dd3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "permissive", "max_line_length": 24, "num_lines": 1, "path": "/robolearn/torch/algorithms/rl_algos/gps/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .mdgps import MDGPS\n" }, { "alpha_fraction": 0.5788416266441345, "alphanum_fraction": 0.5835286378860474, "avg_line_length": 27.179244995117188, "blob_id": "0b2ae86fa130aaad7cc3139bc8991af70ab35661", "content_id": "7e1487347d91034d798f890861dc8bb56fdb935f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2987, "license_type": "permissive", "max_line_length": 80, "num_lines": 106, "path": "/robolearn/utils/samplers/rollout.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef rollout(env, agent, max_path_length=np.inf, animated=False,\n deterministic=None, obs_normalizer=None,\n rollout_start_fcn=None, rollout_end_fcn=None):\n \"\"\"\n Execute a single rollout until the task finishes (environment returns done)\n or max_path_length is reached.\n\n Args:\n env: OpenAI-like environment\n agent: Policy with function get_actions(obs)\n max_path_length:\n animated (Bool): Call env.render() at each timestep or not\n deterministic:\n\n Returns:\n Rollout dictionary (dict)\n\n The following value for the following keys will be a 2D array, with the\n first dimension corresponding to the time dimension.\n - observations (np.ndarray)\n - actions (np.ndarray)\n - rewards (np.ndarray)\n - next_observations (np.ndarray)\n - terminals (np.ndarray)\n\n The next two elements will be lists of dictionaries, with the index into\n the list being the index into the time\n - agent_infos (list)\n - env_infos\n\n \"\"\"\n observations = []\n actions = []\n rewards = []\n terminals = []\n agent_infos = []\n env_infos = []\n\n obs = env.reset()\n\n if rollout_start_fcn is not None:\n rollout_start_fcn()\n\n next_obs = None\n path_length = 0\n\n if animated:\n env.render()\n\n while path_length < max_path_length:\n if obs_normalizer is None:\n policy_input = obs\n else:\n policy_input = obs_normalizer.normalize(obs)\n\n if deterministic is None:\n action, agent_info = agent.get_action(policy_input)\n else:\n action, agent_info = agent.get_action(policy_input,\n deterministic=deterministic)\n next_obs, reward, done, env_info = env.step(action)\n\n observations.append(obs)\n rewards.append(reward)\n terminals.append(done)\n actions.append(action)\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n path_length += 1\n if done:\n break\n obs = next_obs\n if animated:\n env.render()\n\n if rollout_end_fcn is not None:\n rollout_end_fcn()\n\n actions = np.array(actions)\n if len(actions.shape) == 1:\n actions = np.expand_dims(actions, 0)\n\n observations = np.array(observations)\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, 0)\n next_obs = np.expand_dims(next_obs, 0)\n\n next_observations = np.vstack(\n (\n observations[1:, :],\n np.expand_dims(next_obs, 0)\n )\n )\n\n return dict(\n observations=observations,\n actions=actions,\n rewards=np.array(rewards).reshape(-1, 1),\n next_observations=next_observations,\n terminals=np.array(terminals).reshape(-1, 1),\n agent_infos=agent_infos,\n env_infos=env_infos,\n )\n" }, { "alpha_fraction": 0.5774751305580139, "alphanum_fraction": 0.5822598338127136, "avg_line_length": 26.170000076293945, "blob_id": "75e5fb5f857ad7e69e36f0a4f265399fa4b86f44", "content_id": "e8309950d5da831a99ec68ab34564d83c0ae2eef", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2717, "license_type": "permissive", "max_line_length": 80, "num_lines": 100, "path": "/robolearn/utils/samplers/exploration_rollout.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef exploration_rollout(env, exploration_policy, max_path_length=np.inf,\n animated=False, deterministic=None, condition=None):\n \"\"\"\n Execute a single rollout until the task finishes (environment returns done)\n or max_path_length is reached.\n\n Args:\n env:\n exploration_policy:\n max_path_length:\n animated:\n deterministic:\n condition:\n\n Returns:\n Rollout dictionary (dict)\n\n The following value for the following keys will be a 2D array, with the\n first dimension corresponding to the time dimension.\n - observations (np.ndarray)\n - actions (np.ndarray)\n - rewards (np.ndarray)\n - next_observations (np.ndarray)\n - terminals (np.ndarray)\n\n The next two elements will be lists of dictionaries, with the index into\n the list being the index into the time\n - agent_infos (list)\n - env_infos\n\n \"\"\"\n observations = []\n actions = []\n rewards = []\n terminals = []\n agent_infos = []\n env_infos = []\n\n if condition is None:\n exploration_policy.reset()\n obs = env.reset()\n else:\n exploration_policy.reset(condition)\n obs = env.reset(condition)\n\n next_obs = None\n path_length = 0\n\n if animated:\n env.render()\n\n while path_length < max_path_length:\n if deterministic is None:\n a, agent_info = exploration_policy.get_action(obs)\n else:\n a, agent_info = \\\n exploration_policy.get_action(obs, deterministic=deterministic)\n next_obs, reward, done, env_info = env.step(a)\n\n observations.append(obs)\n rewards.append(reward)\n terminals.append(done)\n actions.append(a)\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n path_length += 1\n if done:\n break\n obs = next_obs\n if animated:\n env.render()\n\n actions = np.array(actions)\n if len(actions.shape) == 1:\n actions = np.expand_dims(actions, 1)\n\n observations = np.array(observations)\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, 1)\n next_obs = np.array([next_obs])\n\n next_observations = np.vstack(\n (\n observations[1:, :],\n np.expand_dims(next_obs, 0)\n )\n )\n\n return dict(\n observations=observations,\n actions=actions,\n rewards=np.array(rewards).reshape(-1, 1),\n next_observations=next_observations,\n terminals=np.array(terminals).reshape(-1, 1),\n agent_infos=agent_infos,\n env_infos=env_infos,\n )\n" }, { "alpha_fraction": 0.5503718852996826, "alphanum_fraction": 0.5557809472084045, "avg_line_length": 26.90566062927246, "blob_id": "73d4836e268ed84f5b95e0a2a7d7e0f38e47e0e1", "content_id": "8691be701cceab4f2971ef7ccef0369338ce1338", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1479, "license_type": "permissive", "max_line_length": 74, "num_lines": 53, "path": "/scripts/convert_to_cpu.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\n\nimport argparse\nimport joblib\nimport uuid\nfrom robolearn.utils.logging import logger\nimport json\nimport numpy as np\nimport robolearn.torch.utils.pytorch_util as ptu\nimport torch\n\nfilename = str(uuid.uuid4())\nSEED = 110\n\n\ndef convert_to_cpu(args):\n\n np.random.seed(SEED)\n ptu.seed(SEED)\n\n for file in os.listdir(args.dir):\n if file.endswith(\".pkl\"):\n if file == \"params.pkl\" and not args.params:\n continue\n full_file = os.path.join(args.dir, file)\n data = joblib.load(full_file)\n\n if args.gpu >= 0:\n device_name = \"cuda:%f\" % int(args.gpu)\n else:\n device_name = \"cpu\"\n\n print(\"Converting to %s: %s\" % (device_name, full_file))\n for key, value in data.items():\n if isinstance(value, torch.nn.Module):\n if args.gpu >= 0:\n data[key] = value.cuda(int(args.gpu))\n else:\n data[key] = value.cpu()\n\n joblib.dump(data, full_file, compress=3)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', type=str, help='path to the snapshot file')\n parser.add_argument('--gpu', type=int, default=-1,\n help='GPU id')\n parser.add_argument('--params', action=\"store_true\")\n args = parser.parse_args()\n\n convert_to_cpu(args)\n print('The script has finished')\n" }, { "alpha_fraction": 0.591932475566864, "alphanum_fraction": 0.6072545051574707, "avg_line_length": 31.632652282714844, "blob_id": "a3d82ad54ee66a6719dfee5457270d1edbd55863", "content_id": "6505682f29d9bc9c0c2933c05d7df6c12022058c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3198, "license_type": "permissive", "max_line_length": 91, "num_lines": 98, "path": "/examples/v010/discrete_envs/random_agent.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import pybullet as pb\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\n# from pybullet_envs.robot_bases import BodyPart, Joint, Pose_Helper\n\n# from gym.envs.registration import register\n# register(\n# id='ReacherBullet-v5',\n# entry_point='robolearn.envs.reacher.reacher_env:ReacherBulletEnv')\n\n\n# from robolearn.old_envs.reacher import ReacherBulletEnv\n# from robolearn.old_envs.r2d2 import R2D2BulletEnv\n# from robolearn.old_envs.bigman_pb import BigmanBulletEnv\nfrom robolearn.v010.envs.bigman_pb.bigman_robot import BIGMAN_INIT_CONFIG\nfrom robolearn.envs.simple_envs import FrozenLakeEnv\n\n\nclass RandomAgent(object):\n \"\"\"The world's simplest agent!\"\"\"\n def __init__(self, action_space):\n self.action_space = action_space\n\n def act(self, observation, reward, done):\n return self.action_space.sample()\n\n\nif __name__ == '__main__':\n # env = gym.make('ReacherBullet-v5')\n\n # env = ReacherBulletEnv(render=True)\n # env = R2D2BulletEnv(render=True)\n # env = BigmanBulletEnv(render=True)\n env = FrozenLakeEnv(map_name=\"4x4\", is_slippery=True)\n\n env.seed(0)\n\n agent = RandomAgent(env.action_space)\n\n episode_count = 5\n reward = 0\n done = False\n\n # fig, ax = plt.subplots(1, 1)\n # my_image = ax.imshow(np.zeros((320, 320, 3)), interpolation='nearest', animated=True)\n # fig.canvas.draw()\n # background = fig.canvas.copy_from_bbox(ax.bbox) # cache the background\n # plt.ion()\n # plt.show()\n\n # env.render(mode='human') # Only if we want at the beginning\n for i in range(episode_count):\n input('Press key to reset episode %d/%d' % (i+1, episode_count))\n ob = env.reset()\n input('Press key to start episode %d/%d' % (i+1, episode_count))\n action = np.array(BIGMAN_INIT_CONFIG)\n\n EndTime = 5.0\n init_pos = action\n final_pos = np.zeros_like(init_pos)#action[18] + np.deg2rad(90)\n ts = env.dt\n total_steps = EndTime/ts\n steps_counter = 0\n\n # while True:\n while steps_counter < total_steps:\n steps_counter += 1\n # action = agent.act(ob, reward, done) * 0.001\n # action[:] += (final_pos - init_pos)/total_steps\n action = env.action_space.sample()\n # print('Agent obs:', ob, '| reward:', reward, '| action', action)\n ob, reward, done, _ = env.step(action)\n # print(env._robot.robot_body.get_pose())\n # print(pb.getBasePositionAndOrientation(env.drill_uid[-1]))\n # print('---')\n # input('---')\n if done:\n print('ENVIRONMENT DONE!!!')\n break\n env.render()\n\n # rgb_image = env.render(mode='rgb_array')\n # my_image.set_data(rgb_image)\n # fig.canvas.restore_region(background) # restore background\n # ax.draw_artist(my_image)\n # fig.canvas.blit(ax.bbox) # redraw the axes rectangle\n # # fig.canvas.draw()\n\n # plt.pause(1./100.)\n # time.sleep(1./100.)\n\n env.close()\n input('Press a key to finish the script...')\n" }, { "alpha_fraction": 0.5577659010887146, "alphanum_fraction": 0.5703902244567871, "avg_line_length": 29.752941131591797, "blob_id": "79cd78463c51a44fbbb4526a6d103af92a81ede4", "content_id": "b2df24827483c862d94b0b99898115e2bf108ddf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2614, "license_type": "permissive", "max_line_length": 100, "num_lines": 85, "path": "/examples/v010/continuous_envs/centauro_pb/model_learning.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport argparse\n\nfrom robolearn.old_envs.centauro_pb import CentauroBulletEnv\nfrom robolearn.old_agents.random_gym_agent import RandomGymAgent\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--arg1', type=int, default=60)\n parser.add_argument('--arg2', type=str, default='string1')\n parser.add_argument('--env_with_img', action=\"store_true\", dest='env_with_img',\n default=False)\n\n training_rollouts = 30\n validation_rollouts = 10\n\n # Initialize environment\n render = True\n # render = False\n EndTime = 10.0\n env_with_img = False\n active_joints = 'LA'\n control_mode = 'torque'\n env = CentauroBulletEnv(render=render, active_joints=active_joints,\n control_mode=control_mode)\n env.seed(0)\n\n ts = env.dt\n total_steps = int(EndTime/ts)\n\n # Agent\n agent = RandomGymAgent(env.action_space)\n # agent.seed(5)\n # print(agent.obs_dim)\n # print(agent.act_dim)\n # input('saadsfsdhfkj')\n\n # Collect initial training data\n # env.render(mode='human') # Only if we want at the beginning\n print('Generating training data')\n max_rollouts = training_rollouts\n for rr in range(max_rollouts):\n # input('Press key to reset episode %d/%d' % (i+1, episode_count))\n ob = env.reset()\n reward = None\n done = False\n input('Press key to start episode %d/%d' % (rr+1, max_rollouts))\n\n steps_counter = 0\n\n # while True:\n while steps_counter < total_steps:\n print('external_counter', steps_counter,\n ' | mx_steps:', total_steps)\n action = agent.act(ob, reward, done) * 0.001\n\n obs, reward, done, _ = env.step(action)\n if done:\n print('ENVIRONMENT DONE!!!')\n break\n # env.render()\n\n # if env_with_img:\n # dim_img_data = img_width*img_height*3\n # rgb_image = obs[-dim_img_data:].astype(np.uint8).reshape(img_width, img_height, 3)\n # else:\n # rgb_image = env.render(mode='rgb_array')\n # my_image.set_data(rgb_image)\n # fig.canvas.restore_region(background) # restore background\n # ax.draw_artist(my_image)\n # fig.canvas.blit(ax.bbox) # redraw the axes rectangle\n # # fig.canvas.draw()\n\n # plt.pause(1./100.)\n # time.sleep(1./100.)\n steps_counter += 1\n\n env.close()\n input('Press a key to finish the script...')\n\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5416805148124695, "alphanum_fraction": 0.5453338027000427, "avg_line_length": 27.121496200561523, "blob_id": "542ffcb953478631776a11d96f2093311378321e", "content_id": "c8c016f8156559aa3488fd7e19f8bdf96d347367", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3011, "license_type": "permissive", "max_line_length": 75, "num_lines": 107, "path": "/robolearn/torch/utils/distributions/tanh_normal.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis code is based on TanhNormal class.\nhttps://github.com/vitchyr/rlkit\n\"\"\"\nimport torch\nfrom torch.distributions import Distribution\nfrom torch.distributions import Normal\n\n\nclass TanhNormal(Distribution):\n \"\"\"\n Represent distribution of X where\n X ~ tanh(Z)\n Z ~ N(mean, std)\n\n Note: this is not very numerically stable.\n \"\"\"\n def __init__(self, normal_mean, normal_std, epsilon=1e-6):\n \"\"\"\n\n Args:\n normal_mean (Tensor): Mean of the normal distribution\n normal_std (Tensor): Std of the normal distribution\n epsilon (Double): Numerical stability epsilon when computing\n log-prob.\n \"\"\"\n super(TanhNormal, self).__init__()\n self._normal_mean = normal_mean\n self._normal_std = normal_std\n self._normal = Normal(normal_mean, normal_std)\n self._epsilon = epsilon\n\n @property\n def mean(self):\n return self._normal.mean\n\n @property\n def variance(self):\n return self._normal.variance\n\n @property\n def stddev(self):\n return self._normal.stddev\n\n @property\n def epsilon(self):\n return self._epsilon\n\n def sample(self, return_pretanh_value=False):\n # z = self._normal.sample()\n z = self._normal.sample().detach()\n if return_pretanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)\n\n def rsample(self, return_pretanh_value=False):\n z = self._normal.rsample()\n # z = (\n # self._normal_mean +\n # self._normal_std *\n # Normal(\n # ptu.zeros(self._normal_mean.size()),\n # ptu.ones(self._normal_std.size()),\n # ).sample()\n # )\n if return_pretanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)\n\n def sample_n(self, n, return_pre_tanh_value=False):\n z = self._normal.sample_n(n)\n if return_pre_tanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)\n\n def log_prob(self, value, pre_tanh_value=None):\n \"\"\"\n Returns the log of the probability density function evaluated at\n `value`.\n\n Args:\n value (Tensor):\n pre_tanh_value (Tensor): arctan(value)\n\n Returns:\n log_prob (Tensor)\n\n \"\"\"\n if pre_tanh_value is None:\n pre_tanh_value = torch.log(\n (1+value) / (1-value)\n ) / 2\n\n return self._normal.log_prob(pre_tanh_value) - \\\n torch.log(1. - value * value + self._epsilon)\n # return self.normal.log_prob(pre_tanh_value) - \\\n # torch.log(1. - torch.tanh(pre_tanh_value)**2 + self._epsilon)\n\n def cdf(self, value, pre_tanh_value=None):\n if pre_tanh_value is None:\n pre_tanh_value = torch.log(\n (1+value) / (1-value)\n ) / 2\n return self._normal.cdf(pre_tanh_value)\n\n\n" }, { "alpha_fraction": 0.6699751615524292, "alphanum_fraction": 0.6699751615524292, "avg_line_length": 24.1875, "blob_id": "e99ae5bda0678cf855900fe7c7728556695e696f", "content_id": "9daab65122e9c31b8909797122686515bfaa9b3d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "permissive", "max_line_length": 69, "num_lines": 16, "path": "/robolearn/models/policies/random_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.models.policies.base import SerializablePolicy\n\n\nclass RandomPolicy(SerializablePolicy):\n \"\"\"\n Policy that samples an action from action space.\n \"\"\"\n\n def __init__(self, action_space):\n\n self.action_space = action_space\n\n super(RandomPolicy, self).__init__(action_dim=action_space.n)\n\n def get_action(self, obs):\n return self.action_space.sample(), {}\n" }, { "alpha_fraction": 0.7191780805587769, "alphanum_fraction": 0.7191780805587769, "avg_line_length": 11.083333015441895, "blob_id": "a616fc13d530dd7634501000a861be8474579a6a", "content_id": "29f24d4b06ac235b3e71384bcce53e8f5a4ffc98", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "permissive", "max_line_length": 32, "num_lines": 12, "path": "/robolearn/torch/algorithms/rl_algos/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "# VB\nfrom .sql import SQL\n\n# PG\nfrom .reinforce import Reinforce\n\n# AC\nfrom .ddpg import DDPG\nfrom .sac import SAC\n\n# GPS\nfrom .gps import MDGPS\n\n" }, { "alpha_fraction": 0.859649121761322, "alphanum_fraction": 0.859649121761322, "avg_line_length": 56, "blob_id": "2a9ddc6cc2e0c5e785fffdef2e3bed33f9238040", "content_id": "6bb06984f799e2f0b617a27a9f345d8a0ba4ce43", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "permissive", "max_line_length": 56, "num_lines": 2, "path": "/robolearn/models/values/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.models.values.v_function import VFunction\nfrom robolearn.models.values.q_function import QFunction\n" }, { "alpha_fraction": 0.8517587780952454, "alphanum_fraction": 0.8517587780952454, "avg_line_length": 29.615385055541992, "blob_id": "601ecbcf1477e87a5bb633b6735bd5908bcc282a", "content_id": "866c79663a9a5ad2870e3e8b383d2c0b6e2fe437", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "permissive", "max_line_length": 58, "num_lines": 13, "path": "/robolearn/utils/data_management/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .path_builder import PathBuilder\n\n# Replay Buffers\nfrom .simple_replay_buffer import SimpleReplayBuffer\nfrom .env_replay_buffer import EnvReplayBuffer\nfrom .multigoal_replay_buffer import MultiGoalReplayBuffer\nfrom .fake_replay_buffer import FakeReplayBuffer\n\n\n# Normalizers\nfrom .normalizer import Normalizer\nfrom .normalizer import IdentityNormalizer\nfrom .normalizer import FixedNormalizer\n" }, { "alpha_fraction": 0.6007136106491089, "alphanum_fraction": 0.6334738731384277, "avg_line_length": 26.650224685668945, "blob_id": "0c35e17c5d4ea50acce65d08101ff401efc716b5", "content_id": "938cc83a0945ffbea913953193651518604be9d8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6166, "license_type": "permissive", "max_line_length": 111, "num_lines": 223, "path": "/examples/miscellaneous/test_multipol.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import time\nimport torch\nfrom robolearn.torch.policies import TanhGaussianWeightedMultiPolicy\nfrom robolearn.torch.models.values import NNMultiQFunction\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn.utils.data_management import MultiGoalReplayBuffer\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom torch.autograd import Variable\n\n\n# SEED\nSEED = 48\ntorch.manual_seed(SEED)\n\nT = 5000\nSIM_TIMESTEP = 0.01\nFRAME_SKIP = 1\nDT = SIM_TIMESTEP * FRAME_SKIP\nenv_params = dict(\n is_render=False,\n obs_with_img=False,\n active_joints='RA',\n control_mode='tasktorque',\n # _control_mode='torque',\n # _control_mode='velocity',\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n obs_distances=False,\n balance_cost_weight=2.0,\n fall_cost_weight=0.5,\n tgt_cost_weight=20.0,\n # tgt_cost_weight=50.0,\n balance_done_cost=0., # 2.0*PATH_LENGTH, # TODO: dont forget same balance weight\n tgt_done_reward=0., # 20.0,\n # tgt_cost_weight=5.0,\n # balance_cost_weight=0.0,\n # fall_cost_weight=0.0,\n # tgt_cost_weight=0.0,\n # balance_cost_weight=5.0,\n # fall_cost_weight=7.0,\n ctrl_cost_weight=1.0e-1,\n use_log_distances=True,\n log_alpha_pos=1e-4,\n log_alpha_ori=1e-4,\n goal_tolerance=0.05,\n min_obj_height=0.60,\n max_obj_height=1.20,\n max_obj_distance=0.20,\n max_time=None,\n subtask=None,\n # subtask=1,\n random_init=True,\n\n)\nenv = CentauroTrayEnv(**env_params)\n\nobs_dim = env.obs_dim\naction_dim = env.action_dim\nn_intentions = env.n_subgoals\n\n\n# REPLAY BUFFER\nBATCH_SIZE = 256\nreplay_buffer = MultiGoalReplayBuffer(\n max_replay_buffer_size=1e6,\n obs_dim=obs_dim,\n action_dim=action_dim,\n reward_vector_size=n_intentions,\n)\n\n\n# POLICY PARAMS\nshared_hidden_sizes = (256, 256)\nunshared_hidden_sizes = (256, 256)\nunshared_mix_hidden_sizes = (256, 256)\n\nnn_pol = TanhGaussianWeightedMultiPolicy(\n obs_dim=obs_dim,\n action_dim=action_dim,\n n_policies=n_intentions,\n shared_hidden_sizes=shared_hidden_sizes,\n unshared_hidden_sizes=unshared_hidden_sizes,\n unshared_mix_hidden_sizes=unshared_mix_hidden_sizes,\n hidden_activation='relu',\n)\n\n# Q_VAL\nu_qf = NNMultiQFunction(obs_dim=obs_dim,\n action_dim=action_dim,\n n_qs=n_intentions,\n # shared_hidden_sizes=[net_size, net_size],\n shared_hidden_sizes=(256, 256),\n unshared_hidden_sizes=(256, 256))\n\nprint('NN MULTI POLICY')\nprint(nn_pol)\nprint('**\\n'*4)\n\nprint(\"ALL PARAMS\")\nfor name, param in nn_pol.named_parameters():\n print(name, param.shape)\nprint('**\\n'*4)\n\nprint(\"SHARED PARAMS\")\nfor name, param in nn_pol.named_shared_parameters():\n print(name, param.shape)\nprint('**\\n'*4)\n\nprint(\"POL PARAMS\")\nfor name, param in nn_pol.named_policies_parameters():\n print(name, param.shape)\nprint('**\\n'*4)\n\nprint(\"MIX PARAMS\")\nfor name, param in nn_pol.named_mixing_parameters():\n print(name, param.shape)\nprint('**\\n'*4)\n\n# input('Press key to start training')\n\nbatch_size = 50\nall_obs = torch.randn((batch_size, obs_dim))#*5**2 + 30\ndes_acts = torch.randn((batch_size, action_dim))\ndes_subacts = torch.randn((batch_size, action_dim, n_intentions))\n\n# o = nn_pol(a, _val_idxs=[0], deterministic=True)\n# error = torch.sum(b0 - o[0][0])\n\nloss_fn = torch.nn.MSELoss(size_average=False)\nlearning_rate = 1e-4\noptimizer_pol = torch.optim.Adam(nn_pol.parameters(), lr=learning_rate)\n\n# # y0 = b0\n# for tt in range(100000):\n# # loss = loss_fn(y_pred[0], y0) + loss_fn(y_pred[1], y1)\n# loss = 0\n# for aa in range(n_policies):\n# a_preds = nn_pol(all_obs, pol_idx=aa, deterministic=True)[0]\n# loss += loss_fn(des_subacts[:, :, aa], a_preds)\n# a_preds = nn_pol(all_obs, pol_idx=None, deterministic=True)[0]\n# loss += loss_fn(des_acts, a_preds)\n# print(tt, loss.item())\n#\n# optimizer_pol.zero_grad()\n# loss.backward()\n# optimizer_pol.step()\n# print('='*10)\n# print('='*10)\n# input('wuuu')\nobs = env.reset()\nstart = time.time()\nfor tt in range(T):\n act, agent_info = nn_pol.get_action(obs, pol_idx=None, deterministic=True)\n next_obs, reward, done, env_info = env.step(act)\n replay_buffer.add_sample(\n observation=obs,\n action=act,\n reward=reward,\n terminal=done,\n next_observation=next_obs,\n agent_info=agent_info,\n env_info=env_info,\n )\n obs = next_obs\n if tt > BATCH_SIZE:\n batch = replay_buffer.random_batch(BATCH_SIZE)\n\n for uu in range(n_intentions):\n # Get batch rewards and terminal for unintentional tasks\n rewards = Variable(ptu.from_numpy(batch['reward_vectors'][:, uu]).float(), requires_grad=False)\n rewards = rewards.unsqueeze(-1)\n terminals = Variable(ptu.from_numpy(batch['terminal_vectors'][:, uu]).float(), requires_grad=False)\n terminals = terminals.unsqueeze(-1)\nprint('&&&&\\n'*6)\ntotal_time = time.time() - start\nprint('TOTAL TIME:', total_time)\nprint('DES TIME', T*DT)\nprint('TOT/DES', total_time/(T*DT))\ninput('NADA DESPUESSS')\n# print(a)\n# print(o[0])\n# print(b0)\n\n# input('Now train Value Fcn')\n\n# ###\n# ###\n# ###\n# ###\n# ###\n\nvalues = torch.Tensor([10.11, -120.56, 150.9923])\nnn_val = NNMultiQFunction(5, 2, 3, [3], [2])\n\noptimizer_val = torch.optim.Adam(nn_val.parameters(), lr=learning_rate)\n\nobs = torch.Tensor([4, 5, -10, -100, 2])\nacts = torch.Tensor([2.1, -100.2])\n\ny0 = values[0]\ny1 = values[1]\ny2 = values[2]\nfor tt in range(20000):\n # y_pred = nn_pol(a, _val_idxs=[0, 1], deterministic=True)[0]\n y_pred = nn_val(obs, acts, val_idxs=[0, 1, 2])\n loss = loss_fn(y_pred[0], y0) + loss_fn(y_pred[1], y1) + loss_fn(y_pred[2], y2)\n # loss = loss_fn(y_pred[0], y1)\n print(tt, loss.item())\n\n optimizer_val.zero_grad()\n loss.backward()\n optimizer_val.step()\n\ny_pred = nn_val(obs, acts, val_idxs=[0, 1, 2])\nprint('Prediction Value', y_pred, 'Desired:', values)\n\n# for name, param in nn_val.named_parameters():\n# print('+'*10)\n# print(name)\n# print('+'*10)\n# print('VALS', param)\n# print('GRAD', param.grad)\n# print('\\n')\n" }, { "alpha_fraction": 0.5048255324363708, "alphanum_fraction": 0.5246226191520691, "avg_line_length": 29.730037689208984, "blob_id": "b43b7f2f4d354e717ea808a2f38b5a0d149ddd53", "content_id": "5262c2de02490f74a08511377ae32a66a2ee0565", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8082, "license_type": "permissive", "max_line_length": 108, "num_lines": 263, "path": "/scripts/plot_navigation2d_value_fcns.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport joblib\nimport json\nimport os\n\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.envs.simple_envs.navigation2d import Navigation2dGoalCompoEnv\nfrom robolearn.utils.plots.core import subplots\n\nfrom robolearn.utils.plots.core import set_latex_plot\n\nset_latex_plot()\n\n\ndef plot_v_fcn(i_vf, u_vf):\n xlim = (-7, 7)\n ylim = (-7, 7)\n delta = 0.05\n x_min, x_max = tuple(1.1 * np.array(xlim))\n y_min, y_max = tuple(1.1 * np.array(ylim))\n all_x = np.arange(x_min, x_max, delta)\n all_y = np.arange(y_min, y_max, delta)\n xy_mesh = np.meshgrid(all_x, all_y)\n all_obs = np.array(xy_mesh).transpose(1, 2, 0).reshape(-1, 2)\n\n def plot_v_contours(ax, values):\n values = values.reshape(len(all_x), len(all_y))\n\n contours = ax.contour(xy_mesh[0], xy_mesh[1], values, 20,\n colors='dimgray')\n ax.clabel(contours, inline=1, fontsize=10, fmt='%.0f')\n ax.imshow(values, extent=(x_min, x_max, y_min, y_max), origin='lower')\n\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n ax.set_xlabel('X', fontweight='bold')\n ax.set_ylabel('Y', fontweight='bold')\n ax.axis('equal')\n ax.set_aspect('equal', 'box')\n\n ax.set_frame_on(False)\n\n # Compute and ploy Sub-tasks Value-Fcn\n n_unintentions = u_vf.n_heads if u_vf is not None else 0\n n_cols = 3 if u_vf is not None else 1\n n_rows = int(np.ceil((n_unintentions+1)/n_cols))\n subgoals_fig, subgoals_axs = plt.subplots(n_rows, n_cols)\n subgoals_axs = np.atleast_2d(subgoals_axs)\n\n subgoals_fig.suptitle('V-values')\n\n # Compute and plot Main Task Value-fcn\n if i_vf is not None:\n values, _ = i_vf.get_values(all_obs)\n plot_v_contours(subgoals_axs[0, 0], values)\n subgoals_axs[0, 0].set_title(\"Main Task\")\n\n for aa in range(n_unintentions):\n row = (aa+1) // n_cols\n col = (aa+1) % n_cols\n subgo_ax = subgoals_axs[row, col]\n values, _ = u_vf.get_values(all_obs, val_idxs=[aa])\n values = values[0]\n\n subgo_ax.set_title(\"Sub-Task %02d\" % aa)\n plot_v_contours(subgo_ax, values)\n\n\ndef plot_q_fcn(i_qf, i_qf2, u_qf, u_qf2, obs, policy):\n # Load environment\n dirname = os.path.dirname(args.file)\n with open(os.path.join(dirname, 'variant.json')) as json_data:\n env_params = json.load(json_data)['env_params']\n\n env = NormalizedBoxEnv(\n Navigation2dGoalCompoEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n # env.reset()\n # env.render()\n\n obs = np.array(obs)\n n_action_samples = 100\n x_min, y_min = env.action_space.low\n x_max, y_max = env.action_space.high\n delta = 0.05\n # xlim = (1.1*x_min, 1.1*x_max)\n # ylim = (1.1*y_min, 1.1*y_max)\n xlim = (1.0*x_min, 1.0*x_max)\n ylim = (1.0*y_min, 1.0*y_max)\n all_x = np.arange(x_min, x_max, delta)\n all_y = np.arange(y_min, y_max, delta)\n xy_mesh = np.meshgrid(all_x, all_y)\n\n all_acts = np.zeros((len(all_x)*len(all_y), 2))\n all_acts[:, 0] = xy_mesh[0].ravel()\n all_acts[:, 1] = xy_mesh[1].ravel()\n\n n_unintentions = u_qf.n_heads if u_qf is not None else 0\n\n def plot_q_contours(ax, values):\n values = values.reshape(len(all_x), len(all_y))\n\n contours = ax.contour(xy_mesh[0], xy_mesh[1], values, 20,\n colors='dimgray')\n ax.clabel(contours, inline=1, fontsize=10, fmt='%.0f')\n ax.imshow(values, extent=(x_min, x_max, y_min, y_max), origin='lower',\n alpha=0.5)\n\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n ax.set_xlabel('Vel. X', fontweight='bold', fontsize=18)\n ax.set_ylabel('Vel. Y', fontweight='bold', fontsize=18)\n ax.axis('equal')\n ax.set_aspect('equal', 'box')\n ax.grid(False)\n\n def plot_action_samples(ax, actions):\n x, y = actions[:, 0], actions[:, 1]\n ax.scatter(x, y, c='b', marker='*', zorder=5)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n for ob in obs:\n all_obs = np.broadcast_to(ob, (all_acts.shape[0], 2))\n\n fig, all_axs = \\\n subplots(1, n_unintentions + 1,\n gridspec_kw={'wspace': 0, 'hspace': 0},\n )\n # fig.suptitle('Q-val Observation: ' + str(ob))\n fig.tight_layout()\n fig.canvas.set_window_title('q_vals_%1d_%1d' % (ob[0], ob[1]))\n\n all_axs = np.atleast_1d(all_axs)\n\n all_axs[0].set_title('Main Task', fontdict={'fontsize': 30, 'fontweight': 'medium'})\n q_vals = i_qf.get_values(all_obs, all_acts)[0]\n if i_qf2 is not None:\n q2_vals = i_qf2.get_values(all_obs, all_acts)[0]\n q_vals = np.concatenate([q_vals, q2_vals], axis=1)\n q_vals = np.min(q_vals, axis=1, keepdims=True)\n\n plot_q_contours(all_axs[0], q_vals)\n\n if u_qf is None:\n pol_kwargs = dict(\n )\n else:\n pol_kwargs = dict(\n pol_idx=None,\n )\n\n # Compute and plot Main Task Q Value\n action_samples = policy.get_actions(all_obs[:n_action_samples, :],\n deterministic=False,\n **pol_kwargs\n )[0]\n plot_action_samples(all_axs[0], action_samples)\n all_axs[0].set_xticklabels([])\n all_axs[0].set_yticklabels([])\n\n for aa in range(n_unintentions):\n subgo_ax = all_axs[aa + 1]\n subgo_ax.set_title('Sub-Task %02d' % (aa+1), fontdict={'fontsize': 30, 'fontweight': 'medium'} )\n\n q_vals = u_qf.get_values(all_obs, all_acts, val_idxs=[aa])[0]\n q_vals = q_vals[0]\n\n if u_qf2 is not None:\n q2_vals = u_qf2.get_values(all_obs, all_acts)[0]\n q2_vals = q2_vals[0]\n q_vals = np.concatenate([q_vals, q2_vals], axis=1)\n q_vals = np.min(q_vals, axis=1, keepdims=True)\n\n plot_q_contours(subgo_ax, q_vals)\n\n if u_qf is None:\n pol_kwargs = dict(\n )\n else:\n pol_kwargs = dict(\n pol_idx=aa,\n )\n\n # Compute and plot Sub-Task Q Value\n action_samples = policy.get_actions(all_obs[:n_action_samples, :],\n deterministic=False,\n **pol_kwargs\n )[0]\n plot_action_samples(subgo_ax, action_samples)\n\n subgo_ax.get_yaxis().set_visible(False)\n subgo_ax.set_xticklabels([])\n\n # plt.subplots_adjust(wspace=0, hspace=0)\n\n\ndef main(args):\n data = joblib.load(args.file)\n\n policy = data['policy']\n if 'u_qf' in data.keys():\n u_qf = data['u_qf']\n else:\n u_qf = None\n if 'u_qf2' in data.keys():\n u_qf2 = data['u_qf2']\n else:\n u_qf2 = None\n if 'u_vf' in data.keys():\n u_vf = data['u_vf']\n else:\n u_vf = None\n i_qf = data['qf']\n i_qf2 = data['qf2']\n if 'vf' in data.keys():\n i_vf = data['vf']\n else:\n i_vf = None\n\n q_fcn_obs = [\n (4, 4),\n (-2, 4),\n (4, -2),\n (-6, -6),\n (-2, -2),\n ]\n\n # QF Plot\n plot_q_fcn(i_qf, i_qf2, u_qf, u_qf2, q_fcn_obs, policy)\n\n # VF Plot\n plot_v_fcn(i_vf, u_vf)\n\n plt.show()\n\n epoch = data['epoch']\n\n print('Data for epoch: %02d' % epoch)\n\n # IPython.embed()\n # return plotter\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./params.pkl',\n help='path to the snapshot file')\n\n args = parser.parse_args()\n plotter = main(args)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.7924528121948242, "alphanum_fraction": 0.7924528121948242, "avg_line_length": 52, "blob_id": "6a6768375cbf888fa7c8552e8425c6c58ef3bd4b", "content_id": "c67af2cd033816b4f32ea768aef54c8bd575dc9a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "permissive", "max_line_length": 52, "num_lines": 1, "path": "/robolearn/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.version import VERSION as __version__\n" }, { "alpha_fraction": 0.5061293244361877, "alphanum_fraction": 0.5135180354118347, "avg_line_length": 30.83957290649414, "blob_id": "3d88ef3fe2e381a13a0fcbe87093b786acf76373", "content_id": "f5170e126f4cee282dd6cb2c53247a40197c8dcb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5955, "license_type": "permissive", "max_line_length": 80, "num_lines": 187, "path": "/robolearn/torch/policies/gaussian_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis code is based on: https://github.com/vitchyr/rlkit\n\"\"\"\nimport numpy as np\nimport torch\nfrom torch import nn as nn\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.torch.utils.nn import Mlp\nfrom robolearn.models.policies import ExplorationPolicy\nfrom torch.distributions import Normal\n\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\n\nclass GaussianPolicy(Mlp, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = GaussianPolicy(...)\n action, mean, log_std, _ = policy(obs)\n action, mean, log_std, _ = policy(obs, deterministic=True)\n action, mean, log_std, log_prob = policy(obs, return_log_prob=True)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = mean.\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n hidden_sizes,\n std=None,\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0.1,\n output_w_init='xavier_normal',\n output_b_init_val=0.1,\n **kwargs\n ):\n self.save_init_params(locals())\n super(GaussianPolicy, self).__init__(\n hidden_sizes,\n input_size=obs_dim,\n output_size=action_dim,\n hidden_w_init=hidden_w_init,\n hidden_b_init_val=hidden_b_init_val,\n output_w_init=output_w_init,\n output_b_init_val=output_b_init_val,\n **kwargs\n )\n ExplorationPolicy.__init__(self, action_dim)\n\n self.log_std = None\n self.std = std\n if std is None:\n last_hidden_size = obs_dim\n if len(hidden_sizes) > 0:\n last_hidden_size = hidden_sizes[-1]\n self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim)\n\n if output_w_init == 'xavier_normal':\n ptu.layer_init(layer=self.last_fc_log_std,\n activation='linear',\n b=output_b_init_val)\n elif output_w_init == 'xavier_normal_0.1':\n ptu.layer_init(layer=self.last_fc_log_std,\n activation='0.1',\n b=output_b_init_val)\n elif output_w_init == 'xavier_normal_0.01':\n ptu.layer_init(layer=self.last_fc_log_std,\n activation='0.01',\n b=output_b_init_val)\n elif output_w_init == 'xavier_normal_0.001':\n ptu.layer_init(layer=self.last_fc_log_std,\n activation='0.001',\n b=output_b_init_val)\n elif output_w_init == 'xavier_normal_0.003':\n ptu.layer_init(layer=self.last_fc_log_std,\n activation='0.003',\n b=output_b_init_val)\n else:\n raise ValueError(\"Wrong init value:%s\" % output_w_init)\n\n else:\n self.log_std = torch.log(std)\n assert LOG_SIG_MIN <= self.log_std <= LOG_SIG_MAX\n\n def get_action(self, obs_np, deterministic=False):\n \"\"\"\n \"\"\"\n actions, info_dict = self.get_actions(obs_np[None],\n deterministic=deterministic)\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, deterministic=False):\n \"\"\"\n \"\"\"\n return self.eval_np(obs_np, deterministic=deterministic)\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n ):\n \"\"\"\n :param obs: Observation\n :param deterministic: If True, do not sample\n :param return_log_prob: If True, return a sample and its log probability\n \"\"\"\n h = obs\n for i, fc in enumerate(self.fcs):\n h = self.hidden_activation(fc(h))\n mean = self.last_fc(h)\n\n if self.std is None:\n log_std = self.last_fc_log_std(h)\n log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n std = torch.exp(log_std)\n else:\n std = self.std\n log_std = self.log_std\n\n log_prob = None\n expected_log_prob = None\n mean_action_log_prob = None\n\n if deterministic:\n action = mean\n else:\n normal = Normal(mean, std)\n action = normal.rsample()\n if return_log_prob:\n log_prob = normal.log_prob(action,)\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n info_dict = dict(\n mean=mean,\n log_std=log_std,\n log_prob=log_prob,\n expected_log_prob=expected_log_prob,\n std=std,\n mean_action_log_prob=mean_action_log_prob,\n )\n return action, info_dict\n\n def log_action(self, action, obs):\n \"\"\"\n\n Args:\n action:\n obs:\n\n Returns:\n\n \"\"\"\n h = obs\n for i, fc in enumerate(self.fcs):\n h = self.hidden_activation(fc(h))\n\n mean = self.last_fc(h)\n\n if self.std is None:\n log_std = self.last_fc_log_std(h)\n log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n std = torch.exp(log_std)\n else:\n std = self.std\n\n normal = Normal(mean, std)\n log_prob = torch.sum(normal.log_prob(action), dim=-1, keepdim=True)\n return log_prob\n\n # z = (action - mean)/stds\n # return -0.5 * torch.sum(torch.mul(z, z), dim=-1, keepdim=True)\n\n" }, { "alpha_fraction": 0.6108389496803284, "alphanum_fraction": 0.6151255369186401, "avg_line_length": 35.69662857055664, "blob_id": "738dce30c065bdafe2a77a3395f60a6fb01e198a", "content_id": "07f05cee4047c48f537758795d53ec5eb72dd169", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3266, "license_type": "permissive", "max_line_length": 103, "num_lines": 89, "path": "/scripts/sim_policy_ui.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers import rollout\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import set_gpu_mode\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\n\nfrom robolearn.envs.simple_envs.navigation2d.navigation2d_goalcompo_env import Navigation2dGoalCompoEnv\nfrom robolearn.torch.policies import WeightedMultiPolicySelector\nfrom robolearn.models.policies import MakeDeterministic\nimport argparse\nimport joblib\nimport uuid\nfrom robolearn.utils.logging import logger\nimport json\n\nfilename = str(uuid.uuid4())\n\n\ndef simulate_policy(args):\n data = joblib.load(args.file)\n if args.deterministic:\n if args.un > -1:\n print('Using the deterministic version of the UNintentional policy '\n '%02d.' % args.un)\n if 'u_policy' in data:\n policy = MakeDeterministic(\n # MultiPolicySelector(data['u_policy'], args.un))\n WeightedMultiPolicySelector(data['policy'], args.un))\n else:\n policy = MakeDeterministic(\n WeightedMultiPolicySelector(data['policy'], args.un))\n else:\n print('Using the deterministic version of the Intentional policy.')\n policy = MakeDeterministic(data['policy'])\n else:\n if args.un > -1:\n print('Using the UNintentional stochastic policy %02d' % args.un)\n if 'u_policy' in data:\n # policy = MultiPolicySelector(data['u_policy'], args.un)\n policy = WeightedMultiPolicySelector(data['policy'], args.un)\n else:\n # policy = data['u_policies'][args.un]\n policy = WeightedMultiPolicySelector(data['policy'], args.un)\n else:\n print('Using the Intentional stochastic policy.')\n # policy = data['exploration_policy']\n policy = data['policy']\n\n print(\"Policy loaded!!\")\n\n # Load environment\n with open('variant.json') as json_data:\n env_params = json.load(json_data)['env_params']\n env = NormalizedBoxEnv(\n Navigation2dGoalCompoEnv(**env_params)\n )\n print(\"Environment loaded!!\")\n\n if args.gpu:\n set_gpu_mode(True)\n policy.cuda()\n if isinstance(policy, PyTorchModule):\n policy.train(False)\n while True:\n path = rollout(\n env,\n policy,\n max_path_length=args.H,\n animated=True,\n # deterministic=args.deterministic,\n )\n if hasattr(env, \"log_diagnostics\"):\n env.log_diagnostics([path])\n logger.dump_tabular()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./progress.csv',\n help='path to the snapshot file')\n parser.add_argument('--H', type=int, default=50,\n help='Max length of rollout')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--deterministic', action=\"store_true\")\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n args = parser.parse_args()\n\n simulate_policy(args)\n input('Press a key to finish the script')\n" }, { "alpha_fraction": 0.4472230076789856, "alphanum_fraction": 0.45761919021606445, "avg_line_length": 36.7279167175293, "blob_id": "04f6d8205ab612796e74df3b08bbd17b691a7477", "content_id": "8f5aa8cb6ec72c59321d0b4c25e83a88cd84b02c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10677, "license_type": "permissive", "max_line_length": 108, "num_lines": 283, "path": "/robolearn/envs/simple_envs/frozen_lake/frozen_lake.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file has been modified from the original file:\nhttps://github.com/berkeleydeeprlcourse/homework/blob/c1027d83cd542e67ebed982d44666e0d22a00141/hw2/HW2.ipynb\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nimport sys\nimport os\n\nfrom gym import utils\nfrom robolearn.envs.discrete_env import DiscreteEnv\n\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n\nMAPS = {\n \"4x4\": [\n \"SFFF\",\n \"FHFH\",\n \"FFFH\",\n \"HFFG\"\n ],\n \"8x8\": [\n \"SFFFFFFF\",\n \"FFFFFFFF\",\n \"FFFHFFFF\",\n \"FFFFFHFF\",\n \"FFFHFFFF\",\n \"FHHFFFHF\",\n \"FHFFHFHF\",\n \"FFFHFFFG\"\n ],\n}\n\nMAP_BG_COLORS = {b'S': 'lightblue', b'G': 'green', b'F': 'white', b'H': 'black'}\nMAP_BG_TXT_COLORS = {b'S': 'black', b'G': 'black', b'F': 'white', b'H': 'white'}\n\nCOLOR_DICT = dict(zip(mcolors.CSS4_COLORS.keys(),\n [mcolors.hex2color(color)\n for color in mcolors.CSS4_COLORS.values()]))\n\nIMG_HEIGHT = 240\nIMG_WIDTH = 240\n\n\nclass FrozenLakeEnv(DiscreteEnv):\n \"\"\"\n Winter is here. You and your friends were tossing around a frisbee at the\n park when you made a wild throw that left the frisbee out in the middle of\n the lake. The water is mostly frozen, but there are a few holes where the\n ice has melted. If you step into one of those holes, you'll fall into the\n freezing water. At this time, there's an international frisbee shortage,\n so it's absolutely imperative that you navigate across the lake and retrieve\n the disc. However, the ice is slippery, so you won't always move in the\n direction you intend.\n The surface is described using a grid like the following\n\n SFFF\n FHFH\n FFFH\n HFFG\n\n S : starting point, safe\n F : frozen surface, safe\n H : hole, fall to your doom\n G : goal, where the frisbee is located\n\n The episode ends when you reach the goal or fall in a hole.\n You receive a reward of 1 if you reach the goal, and zero otherwise.\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi', 'rgb_array']}\n\n def __init__(self, desc=None, map_name=\"4x4\", is_slippery=True,\n reward_dict=None):\n \"\"\"\n\n :param desc: 2D array specifying what each grid cell means\n (used for plotting)\n :param map_name: '4x4' or '8x8'\n :param is_slippery: Frozen surface is slippery or not\n \"\"\"\n if desc is None and map_name is None:\n raise ValueError('Must provide either desc or map_name')\n elif desc is None:\n desc = MAPS[map_name]\n\n self.desc = desc = np.asarray(desc, dtype='c')\n self.nrow, self.ncol = nrow, ncol = desc.shape\n\n nA = 4\n nS = nrow * ncol\n\n isd = np.array(desc == b'S').astype('float64').ravel()\n isd /= isd.sum()\n\n P = {s: {a: [] for a in range(nA)} for s in range(nS)}\n\n def to_s(row, col):\n return row*ncol + col\n\n def inc(row, col, a):\n if a == 0: # left\n col = max(col-1, 0)\n elif a == 1: # down\n row = min(row+1, nrow-1)\n elif a == 2: # right\n col = min(col+1, ncol-1)\n elif a == 3: # up\n row = max(row-1, 0)\n return (row, col)\n\n for row in range(nrow):\n for col in range(ncol):\n s = to_s(row, col)\n for a in range(4):\n li = P[s][a]\n letter = desc[row, col]\n if letter in b'GH':\n if reward_dict is not None:\n if letter == b'G':\n rew = float(reward_dict['G'])\n elif letter == b'H':\n rew = float(reward_dict['H'])\n else:\n raise ValueError('Wrong key error. It should be G, S, F or H')\n else:\n rew = 0\n li.append((1.0, s, rew, True))\n\n else:\n if is_slippery:\n for b in [(a-1) % 4, a, (a+1) % 4]:\n new_row, new_col = inc(row, col, b)\n new_state = to_s(new_row, new_col)\n new_letter = desc[new_row, new_col]\n done = bytes(new_letter) in b'GH'\n if reward_dict is not None:\n if new_letter == b'G':\n rew = float(reward_dict['G'])\n elif new_letter == b'S':\n rew = float(reward_dict['S'])\n elif new_letter == b'H':\n rew = float(reward_dict['H'])\n elif new_letter == b'F':\n rew = float(reward_dict['F'])\n else:\n raise ValueError('Wrong key error. It'\n 'should be '\n 'G, S, F or H')\n else:\n rew = float(new_letter == b'G')\n li.append((0.8 if b == a else 0.1,\n new_state, rew, done))\n else:\n new_row, new_col = inc(row, col, a)\n new_state = to_s(new_row, new_col)\n new_letter = desc[new_row, new_col]\n done = bytes(new_letter) in b'GH'\n if reward_dict is not None:\n if new_letter == b'G':\n rew = float(reward_dict['G'])\n elif new_letter == b'S':\n rew = float(reward_dict['S'])\n elif new_letter == b'H':\n rew = float(reward_dict['H'])\n elif new_letter == b'F':\n rew = float(reward_dict['F'])\n else:\n raise ValueError('Wrong key error. It'\n 'should be G, S, F or H')\n else:\n rew = float(new_letter == b'G')\n li.append((1.0, new_state, rew, done))\n\n super(FrozenLakeEnv, self).__init__(nS, nA, P, isd)\n\n self.fig = None\n self.ax = None\n self.s_draw = None\n\n def to_row_col(self, s):\n row = int(s // self.ncol)\n col = int(s % self.ncol)\n return row, col\n\n def render(self, mode='human', close=False):\n if close:\n if self.fig is not None:\n plt.close(self.fig)\n self.fig = None\n self.ax = None\n self.s_draw = None\n return\n if mode == 'human':\n if self.fig is None:\n self._plot_backgound()\n self._plot_env()\n plt.ion()\n plt.show()\n else:\n self._plot_env()\n # self.fig.suptitle('Iter %d' % self.internal_counter)\n self.fig.canvas.set_window_title('Frozen Lake environment')\n plt.pause(0.0001)\n return\n else:\n plt.ioff()\n matplotlib.use('Agg')\n self._plot_backgound()\n self._plot_env()\n dpi = self.fig.get_dpi()\n self.fig.set_size_inches(float(IMG_HEIGHT)/float(dpi),\n float(IMG_WIDTH)/float(dpi))\n self.fig.subplots_adjust(bottom=0., left=0., right=1., top=1.)\n extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())\n self.fig.savefig('/tmp/temporal_frozen_lake_img',\n format='png', bbox_inches=extent)\n self.render(close=True)\n plt.ion()\n return plt.imread('/tmp/temporal_frozen_lake_img')[:, :, :3]\n\n def _plot_env(self):\n row, col = self.to_row_col(self.s)\n self._robot_marker(col, row)\n self.fig.canvas.draw()\n\n def _plot_backgound(self):\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(1, 1, 1)\n\n self.fig.canvas.draw()\n\n self.env_color = np.ones((self.nrow, self.ncol, 3))\n for row in range(self.nrow):\n for col in range(self.ncol):\n letter = self.desc[row, col]\n self.env_color[row, col, :] = COLOR_DICT[MAP_BG_COLORS[letter]]\n\n square_size = 0.5\n self.env_image = self.ax.imshow(self.env_color, interpolation='nearest')\n self.ax.set_xticks(np.arange(self.ncol)-square_size)\n self.ax.set_yticks(np.arange(self.nrow)-square_size)\n self.ax.set_xticklabels([])\n self.ax.set_yticklabels([])\n self.ax.xaxis.set_ticks_position('none')\n self.ax.yaxis.set_ticks_position('none')\n for row in range(self.nrow):\n for col in range(self.ncol):\n letter = self.desc[row, col]\n self.ax.text(col, row, str(self.desc[row, col].item().decode()),\n color=COLOR_DICT[MAP_BG_TXT_COLORS[letter]],\n size=10, verticalalignment='center',\n horizontalalignment='center', fontweight='bold')\n if letter == b'S':\n self._robot_marker(col, row)\n self.ax.grid(color='k', lw=2, ls='-')\n self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)\n\n def _robot_marker(self, x, y, color='red'):\n if self.s_draw is not None:\n self.s_draw.remove()\n\n if self.ncol == 4:\n zoom = 0.03\n else:\n zoom = 0.015\n image = plt.imread(os.path.join(os.path.dirname(__file__),\n 'robotio.png'))\n\n for cc in range(3):\n image[:, :, cc] = COLOR_DICT[color][cc]\n\n im = OffsetImage(image, zoom=zoom)\n ab = AnnotationBbox(im, (x, y), xycoords='data', frameon=False)\n self.s_draw = self.ax.add_artist(ab)\n" }, { "alpha_fraction": 0.6037742495536804, "alphanum_fraction": 0.6289708018302917, "avg_line_length": 45.227787017822266, "blob_id": "875ed6e4fd6c51c1cb2181fa2a0e22cdc46c034a", "content_id": "39672e644d2bf02f31cc2b9fc9f922c145c6a6fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28615, "license_type": "permissive", "max_line_length": 135, "num_lines": 619, "path": "/scenarios/tests/traj-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\n\nimport rospy\nimport tf\nfrom XCM.msg import CommandAdvr\nfrom XCM.msg import JointStateAdvr\nfrom gazebo_msgs.srv import DeleteModel\nfrom gazebo_msgs.srv import SpawnModel\nfrom geometry_msgs.msg import Pose\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation\nfrom robolearn.old_utils.trajectory_interpolators import quaternion_slerp_interpolation\n\nfrom robolearn.old_utils.robot_model import *\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_drill_relative_pose, create_hand_relative_pose\nfrom robolearn.old_utils.transformations_utils import *\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n# Script parameters\n#box_position = np.array([0.75, 0.0, 0.0184])\nbox_position = np.array([0.75-0.05,\n 0.00,\n 0.0184])\nbox_position = np.array([0.64, -0.03-0.3, 0.0173+0.2]) # drill\nbox_position = np.array([0.64, 0., -0.1327]) # beer\nbox_position = np.array([0.64, 0., -0.1327]) # beer relative\n\nbox_position = create_drill_relative_pose(drill_x=0.86, drill_y=-0.1776-0.05, drill_z=-0.1327+0.17, drill_yaw=0)[-3:]\n\nbox_size = [0.4, 0.5, 0.3]\nbox_size = [0.1, 0.1, 0.3] # drill\nbox_size = [0.11, 0.11, 0.3] # beer\nbox_yaw = 0 # Degrees\n#box_orient = tf.transformations.rotation_matrix(np.deg2rad(15), [1, 0, 0]) # For the EEs is rotation in X\nbox_orient = tf.transformations.rotation_matrix(np.deg2rad(box_yaw), [0, 0, 1])\nbox_matrix = homogeneous_matrix(rot=box_orient, pos=box_position)\nfreq = 100\nT_init = 2\nT_reach = 6\nT_lift = 1\n\n# Save/Load file name\nfile_name = 'trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)\nload_reach_traj = False\nload_lift_traj = False\n#load_reach_traj = True\n#load_lift_traj = True\nsave_reach_traj = False\nsave_lift_traj = False\n\nremove_spawn_new_box = False\n\nreach_option = 3\n#reach_option 0: IK desired final pose, interpolate in joint space\n#reach_option 1: Trajectory in EEs, then IK whole trajectory\n#reach_option 2: Trajectory in EEs, IK with Jacobians\n\nlift_option = 0\n#lift_option 0: IK desired final pose, interpolate the others\n#lift_option 1: Trajectory in EEs, then IK whole trajectory\n#lift_option 2: Trajectory in EEs, IK with Jacobians\n\nregularization_parameter = 0.01 # For IK optimization algorithm\n\n\nq_init = np.zeros(31)\n# q_init[16] = np.deg2rad(50)\n# q_init[25] = np.deg2rad(-50)\n#q_init[24] = np.deg2rad(20)\n#q_init[25] = np.deg2rad(-35)\n#q_init[26] = np.deg2rad(0)\n#q_init[27] = np.deg2rad(-95)\n#q_init[28] = np.deg2rad(0)\n#q_init[29] = np.deg2rad(0)\n#q_init[30] = np.deg2rad(0)\nq_init[24] = np.deg2rad(-30)\nq_init[25] = np.deg2rad(-65)\nq_init[26] = np.deg2rad(20)\nq_init[27] = np.deg2rad(-95)\nq_init[28] = np.deg2rad(20)\nq_init[29] = np.deg2rad(0)\nq_init[30] = np.deg2rad(0)\n\n\n\nprint(q_init[24:])\nprint(bigman_params['joints_limits'][24:])\n#q_init = np.deg2rad(np.array(bigman_Apose))\n#q_init = np.deg2rad(np.array(bigman_Fpose))\n#q_init = np.deg2rad(np.array(bigman_Tpose))\n\n# Robot Model\nrobot_urdf = '/home/domingo/robotology-superbuild/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_urdf = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\nleft_sign = np.array([1, 1, 1, 1, 1, 1, 1])\nright_sign = np.array([1, -1, -1, 1, -1, 1, -1])\n\nactual_RH_pose = robot_model.fk(RH_name, q=q_init, body_offset=r_soft_hand_offset, update_kinematics=True)\n#desired_RH_reach_pose = polynomial5_interpolation(N, RH_reach_pose, actual_RH_pose)[0]\ndesired_RH_pose = actual_RH_pose.copy()\ndesired_RH_pose[-3] -= 0.01\ndesired_RH_pose[-2] += 0.10\ndesired_RH_pose[-1] -= 0.12\n\ndrill_x = 0.70\ndrill_y = 0.00\ndrill_z = -0.1327\ndrill_yaw = 0 # Degrees\ndrill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.16, drill_y=drill_y-0.2276, drill_z=drill_z, drill_yaw=drill_yaw)\ndrill_size = [0.1, 0.1, 0.3]\nhand_y = -drill_size[1]/2-0.02\nhand_z = drill_size[2]/2+0.02\ndesired_RH_pose = create_hand_relative_pose(drill_pose3, hand_x=0.0, hand_y=hand_y, hand_z=hand_z, hand_yaw=0)\n\ntorso_joints = bigman_params['joint_ids']['TO']\nq_reach2 = robot_model.ik(RH_name, desired_RH_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n\nprint(repr(actual_RH_pose))\nprint(repr(desired_RH_pose))\nprint(np.rad2deg(q_reach2))\nraw_input(\"BORRAME\")\n\n\n# ###########\n# REACH BOX #\n# ###########\nif not load_reach_traj:\n print(\"\\033[5mGenerating reaching trajectory...\")\n ## Orientation\n ##des_orient = homogeneous_matrix(rot=rot)\n #des_orient = tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0])\n ###des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-5), [1, 0, 0]))\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-3), [1, 0, 0]))\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(5), [0, 0, 1]))\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(10), [0, 1, 0]))\n #des_orient = des_orient.dot(box_orient)\n box_LH_position = np.array([0.00,\n box_size[1]/2. - 0.00,\n -0.05])\n box_LH_matrix = homogeneous_matrix(pos=box_LH_position)\n LH_reach_matrix = box_matrix.dot(box_LH_matrix)\n LH_reach_matrix = LH_reach_matrix.dot(tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0]))\n LH_reach_pose = np.zeros(7)\n LH_reach_pose[4:] = tf.transformations.translation_from_matrix(LH_reach_matrix)\n LH_reach_pose[:4] = tf.transformations.quaternion_from_matrix(LH_reach_matrix)\n\n box_RH_position = np.array([0.00,\n -box_size[1]/2. + 0.00,\n -0.05])\n box_RH_matrix = homogeneous_matrix(pos=box_RH_position)\n RH_reach_matrix = box_matrix.dot(box_RH_matrix)\n # Rotate HAND\n RH_reach_matrix = RH_reach_matrix.dot(tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0]))\n RH_reach_pose = np.zeros(7)\n RH_reach_pose[4:] = tf.transformations.translation_from_matrix(RH_reach_matrix)\n RH_reach_pose[:4] = tf.transformations.quaternion_from_matrix(RH_reach_matrix)\n\n N = int(np.ceil(T_reach*freq))\n torso_joints = bigman_params['joint_ids']['TO']\n\n if reach_option == 0:\n q_reach = robot_model.ik(LH_name, LH_reach_pose, body_offset=l_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n print(\"TODO: NOT MOVING LEFT ARM\")\n q_reach = q_init.copy()\n q_reach2 = robot_model.ik(RH_name, RH_reach_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n\n q_reach[bigman_params['joint_ids']['RA']] = q_reach2[bigman_params['joint_ids']['RA']]\n\n # Trajectory\n joint_reach_trajectory = polynomial5_interpolation(N, q_reach, q_init)[0]\n\n print(\"TODO: UNCOMMENT BELOW\")\n # save_reach_traj = raw_input(\"Save reach trajectory before visualize? (y/yes): \")\n # if save_reach_traj.lower() in ['y', 'yes']:\n # np.save(file_name + '_m' + str(reach_option) + '_reach.npy', joint_reach_trajectory)\n\n elif reach_option == 1:\n q = q_init.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_reach_pose = polynomial5_interpolation(N, LH_reach_pose, actual_LH_pose)[0]\n desired_RH_reach_pose = polynomial5_interpolation(N, RH_reach_pose, actual_RH_pose)[0]\n\n viapoint_LH_reach = np.empty(3)\n\n quatLH_interpolation = quaternion_slerp_interpolation(N, LH_reach_pose[:4], actual_LH_pose[:4])\n quatRH_interpolation = quaternion_slerp_interpolation(N, RH_reach_pose[:4], actual_RH_pose[:4])\n desired_LH_reach_pose[:, :4] = quatLH_interpolation\n desired_RH_reach_pose[:, :4] = quatRH_interpolation\n\n joint_reach_trajectory = np.zeros((desired_LH_reach_pose.shape[0], robot_model.q_size))\n joint_reach_trajectory[0, :] = q\n\n q_reach = np.empty(robot_model.q_size)\n q_reach2 = np.empty(robot_model.q_size)\n for ii in range(desired_LH_reach_pose.shape[0]-1):\n print(\"%d/%d \" % (ii+1, N))\n #print(\"%d/%d \" % (ii+1, N))\n q_reach[:] = robot_model.ik(LH_name, desired_LH_reach_pose[ii+1, :], body_offset=l_soft_hand_offset,\n q_init=joint_reach_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method='optimization',\n regularization_parameter=regularization_parameter)\n q_reach2[:] = robot_model.ik(RH_name, desired_RH_reach_pose[ii+1, :], body_offset=r_soft_hand_offset,\n q_init=joint_reach_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method='optimization',\n regularization_parameter=regularization_parameter)\n q_reach[bigman_params['joint_ids']['RA']] = q_reach2[bigman_params['joint_ids']['RA']]\n joint_reach_trajectory[ii+1, :] = q_reach\n #print(joint_reach_trajectory[ii+1, :]-joint_reach_trajectory[ii, :])\n print(sum(joint_reach_trajectory[ii+1, :]-joint_reach_trajectory[ii, :]))\n\n save_reach_traj = raw_input(\"Save reach trajectory before visualize? (y/yes): \")\n if save_reach_traj.lower() in ['y', 'yes']:\n np.save(file_name + '_m' + str(reach_option) + '_reach.npy', joint_reach_trajectory)\n\n elif reach_option == 2:\n q = q_init.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n\n print(\"TODO: TEMPORALY CHANGING DESIRED LH/RH POSES\")\n LH_reach_pose = actual_LH_pose\n RH_reach_pose = desired_RH_pose\n\n desired_LH_reach_pose = polynomial5_interpolation(N, LH_reach_pose, actual_LH_pose)[0]\n desired_RH_reach_pose = polynomial5_interpolation(N, RH_reach_pose, actual_RH_pose)[0]\n\n quatLH_interpolation = quaternion_slerp_interpolation(N, LH_reach_pose[:4], actual_LH_pose[:4])[0]\n quatRH_interpolation = quaternion_slerp_interpolation(N, RH_reach_pose[:4], actual_RH_pose[:4])[0]\n desired_LH_reach_pose[:, :4] = quatLH_interpolation\n desired_RH_reach_pose[:, :4] = quatRH_interpolation\n\n #for ii in range(desired_LH_reach_pose.shape[0]):\n # print(desired_LH_reach_pose[ii, 4])\n #raw_input(\"CUCU\")\n\n q_reach = robot_model.ik(LH_name, LH_reach_pose, body_offset=l_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n q_reach2 = robot_model.ik(RH_name, RH_reach_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n q_reach[bigman_params['joint_ids']['RA']] = q_reach2[bigman_params['joint_ids']['RA']]\n\n J1 = np.zeros((6, robot_model.qdot_size))\n J2 = np.zeros((6, robot_model.qdot_size))\n K = 500\n elif reach_option == 3:\n pass\n else:\n raise ValueError(\"Wrong reach_option %d\" % reach_option)\n print(\"\\033[31mDONE!! \\033[0m\")\n\n #RH_reach_pose = robot_model.fk(RH_name, q=np.zeros(robot_model.q_size), body_offset=r_soft_hand_offset)\n #RH_reach_pose[4:] = LH_reach_pose[4:]\n #RH_reach_pose[5] = box_position[1] - box_size[1]/2. + 0.02\n #des_orient = tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0])\n #des_orient = des_orient.dot(box_orient)\n #RH_reach_pose[:4] = tf.transformations.quaternion_from_matrix(des_orient)\n\nelse:\n print(\"\\n\\033[5mLoading reaching trajectory...\")\n joint_reach_trajectory = np.load(file_name + '_m' + str(reach_option) + '_reach.npy')\n if reach_option == 2:\n q_reach = joint_reach_trajectory[-1, :]\n J1 = np.zeros((6, robot_model.qdot_size))\n J2 = np.zeros((6, robot_model.qdot_size))\n desired_LH_reach_pose = np.load(file_name+'_reach_LH_EE.npy')\n desired_RH_reach_pose = np.load(file_name+'_reach_RH_EE.npy')\n print(\"\\033[31mDONE!! \\033[0m\")\n\n\n\n# ######## #\n# LIFT BOX #\n# ######## #\n\"\"\"\nif not load_lift_traj:\n print(\"\\033[5mGenerating lifting trajectory...\")\n LH_lift_pose = LH_reach_pose.copy()\n LH_lift_pose[6] += 0.3\n RH_lift_pose = RH_reach_pose.copy()\n RH_lift_pose[6] += 0.3\n\n N = int(np.ceil(T_lift*freq))\n\n #final_LH_lift_pose = actual_LH_lift_pose.copy()\n #final_LH_lift_pose[-1] += 0.3\n #final_LH_lift_pose[-2] -= 0.005\n #final_RH_lift_pose = actual_RH_lift_pose.copy()\n #final_RH_lift_pose[-1] += 0.3\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-4), [1, 0, 0]))\n #des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-9), [1, 0, 0]))\n #des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-3), [0, 0, 1]))\n #final_LH_lift_pose[:4] = tf.transformations.quaternion_from_matrix(des_orient)\n\n if lift_option == 0:\n q_lift = robot_model.ik(LH_name, LH_lift_pose, body_offset=l_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n q_lift2 = robot_model.ik(RH_name, RH_lift_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n q_lift[bigman_params['joint_ids']['RA']] = q_lift2[bigman_params['joint_ids']['RA']]\n joint_lift_trajectory = polynomial5_interpolation(N, q_lift, q_reach)[0]\n\n #if save_lift_traj:\n # np.save(file_name+'_lift.npy', joint_lift_trajectory)\n print(\"TODO: UNCOMMENT BELOW\")\n # save_lift_traj = raw_input(\"Save lift trajectory before visualize? (y/yes): \")\n # if save_lift_traj.lower() in ['y', 'yes']:\n # np.save(file_name + '_m' + str(lift_option) + '_lift.npy', joint_lift_trajectory)\n\n elif lift_option == 1:\n q = q_reach.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_lift_pose = polynomial5_interpolation(N, LH_lift_pose, actual_LH_pose)[0]\n desired_RH_lift_pose = polynomial5_interpolation(N, RH_lift_pose, actual_RH_pose)[0]\n\n joint_lift_trajectory = np.zeros((N, robot_model.q_size))\n joint_lift_trajectory[0, :] = q\n q_lift = np.empty(robot_model.q_size)\n q_lift2 = np.empty(robot_model.q_size)\n for ii in range(N-1):\n print(\"%d/%d \" % (ii+1, N))\n #print(\"%d/%d \" % (ii+1, N))\n q_lift[:] = robot_model.ik(LH_name, desired_LH_lift_pose[ii+1, :], body_offset=l_soft_hand_offset,\n q_init=joint_lift_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method='optimization',\n regularization_parameter=regularization_parameter)\n q_lift2[:] = robot_model.ik(RH_name, desired_RH_lift_pose[ii+1, :], body_offset=r_soft_hand_offset,\n q_init=joint_lift_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method='optimization',\n regularization_parameter=regularization_parameter)\n q_lift[bigman_params['joint_ids']['RA']] = q_lift2[bigman_params['joint_ids']['RA']]\n joint_lift_trajectory[ii+1, :] = q_lift\n\n #if save_lift_traj:\n # np.save(file_name+'_lift.npy', joint_lift_trajectory)\n print(\"TODO: UNCOMMENT THE BELOW\")\n # save_lift_traj = raw_input(\"Save lift trajectory before visualize? (y/yes): \")\n # if save_lift_traj.lower() in ['y', 'yes']:\n # np.save(file_name + '_m' + str(lift_option) + '_lift.npy', joint_lift_trajectory)\n\n elif lift_option == 2:\n T_lift = 2\n N = int(np.ceil(T_lift*freq))\n\n q = q_reach.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_lift_pose = polynomial5_interpolation(N, LH_lift_pose, actual_LH_pose)[0]\n desired_RH_lift_pose = polynomial5_interpolation(N, RH_lift_pose, actual_RH_pose)[0]\n\n J1 = np.zeros((6, robot_model.qdot_size))\n J2 = np.zeros((6, robot_model.qdot_size))\n K = 500\n else:\n raise ValueError(\"Wrong lift_option %d\" % lift_option)\n print(\"\\n\\033[31mDONE!! \\033[0m\")\n\nelse:\n print(\"\\n\\033[5mLoading lifting trajectory...\")\n joint_lift_trajectory = np.load(file_name + '_m' + str(lift_option) + '_lift.npy')\n if lift_option == 2:\n J1 = np.zeros((6, robot_model.qdot_size))\n J2 = np.zeros((6, robot_model.qdot_size))\n desired_LH_lift_pose = np.load(file_name+'_lift_LH_EE.npy')\n desired_RH_lift_pose = np.load(file_name+'_lift_RH_EE.npy')\n print(\"\\033[31mDONE!! \\033[0m\")\n\"\"\"\n\nprint(\"Waiting for ROS...\"),\nwhile rospy.is_shutdown():\n pass\nprint(\"ROS OK\")\n# ROS Stuff\n#raw_input(\"Press for ROS related stuff\")\njoint_state = np.zeros(robot_model.q_size)\njoint_state_id = []\ndef callback(data, params):\n joint_state = params[0]\n joint_state_id = params[1]\n if not joint_state_id:\n joint_state_id[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_state[joint_state_id] = data.link_position\n #print params[0]\n #rospy.loginfo(\"I heard %s\", data.data)\npublisher = rospy.Publisher(\"/xbotcore/bigman/command\", CommandAdvr, queue_size=10)\nsubscriber = rospy.Subscriber(\"/xbotcore/bigman/joint_states\", JointStateAdvr, callback, (joint_state, joint_state_id))\nrospy.init_node('traj_example')\npub_rate = rospy.Rate(freq)\ndes_cmd = CommandAdvr()\ndes_cmd.name = bigman_params['joints_names']\n\nif remove_spawn_new_box:\n f = open('/home/domingo/robotlearning-superbuild/catkin_ws/src/robolearn_gazebo_env/models/cardboard_cube_box/model.sdf', 'r')\n sdf_box = f.read()\n f = open('/home/domingo/robotlearning-superbuild/catkin_ws/src/robolearn_gazebo_env/models/big_support/model.sdf', 'r')\n sdf_box_support = f.read()\n box_pose = Pose()\n box_pose.position.x = box_position[0]\n box_pose.position.y = box_position[1]\n box_pose.position.z = 1.014\n box_quat = tf.transformations.quaternion_from_matrix(box_matrix)\n box_pose.orientation.x = box_quat[0]\n box_pose.orientation.y = box_quat[1]\n box_pose.orientation.z = box_quat[2]\n box_pose.orientation.w = box_quat[3]\n box_support_pose = Pose()\n box_support_pose.position.x = box_position[0]\n box_support_pose.position.y = box_position[1]\n box_support_pose.position.z = 0\n box_support_pose.orientation = box_pose.orientation\n rospy.wait_for_service('gazebo/delete_model')\n delete_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)\n print(\"Deleting previous box...\")\n #raw_input(\"Press for delete box_support\")\n try:\n delete_model_prox(\"box_support\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/delete_model service call failed: %s\" % str(exc))\n try:\n delete_model_prox(\"box\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/delete_model service call failed: %s\" % str(exc))\n rospy.wait_for_service('gazebo/spawn_sdf_model')\n spawn_model_prox = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)\n print(\"Spawning new box...\")\n try:\n spawn_model_prox(\"box_support\", sdf_box_support, \"box_support\", box_support_pose, \"world\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/spawn_sdf_model service call failed: %s\" % str(exc))\n try:\n spawn_model_prox(\"box\", sdf_box, \"box\", box_pose, \"world\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/spawn_sdf_model service call failed: %s\" % str(exc))\n\n# ##################### #\n# INITIAL CONFIGURATION #\n# ##################### #\nN = int(np.ceil(T_init*freq))\nprint(q_init)\njoint_init_traj = polynomial5_interpolation(N, q_init, joint_state)[0]\nraw_input(\"Press key for moving to INIT\")\nfor ii in range(N):\n des_cmd.position = joint_init_traj[ii, :]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n# RESETING DRILL\nprint(\"Reset drill\")\n#bigman_drill_pose = create_drill_relative_pose(drill_x=0.86, drill_y=-0.1776-0.05, drill_z=-0.1327+0.15, drill_yaw=0)\nbigman_drill_pose = create_drill_relative_pose(drill_x=drill_x+0.16, drill_y=drill_y-0.2276, drill_z=drill_z+0.15, drill_yaw=drill_yaw)\n#spawn_drill_gazebo(bigman_drill_pose, drill_size=drill_size)\n#raw_input('press a key')\nprint(bigman_drill_pose)\nprint('+'*10)\n#reset_bigman_drill_gazebo(bigman_drill_pose, drill_size=None)\n\n#temp_count = 0\n#des_cmd.position = q_init\n#while temp_count < 100:\n# publisher.publish(des_cmd)\n# pub_rate.sleep()\n# temp_count += 1\n# rospy.wait_for_service('/gazebo/reset_world')\n# reset_srv = rospy.ServiceProxy('/gazebo/reset_world', Empty)\n# try:\n# reset_srv()\n# except rospy.ServiceException as exc:\n# print(\"/gazebo/reset_world service call failed: %s\" % str(exc))\n\n# Send Commands\nraw_input(\"Press key for REACHING\")\n#K = 0.03\n#des_cmd.position = joint_state.copy()\nif reach_option == 0 or reach_option == 1:\n for ii in range(joint_reach_trajectory.shape[0]):\n #print(\"Sending REACHING cmd...\")\n #error = joint_trajectory[ii, :] - joint_state\n #print(error[bigman_params['joint_ids']['BA']])\n #des_cmd.position += K*error\n des_cmd.position = joint_reach_trajectory[ii, :]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\nelif reach_option == 2:\n joint_reach_trajectory = np.empty((desired_LH_reach_pose.shape[0], robot_model.q_size))\n q = q_init.copy()\n joint_reach_trajectory[0, :] = q[:]\n for ii in range(desired_LH_reach_pose.shape[0]-1):\n #for ii in range(N-1):\n print(\"Sending REACHING cmd...\")\n #error1 = compute_cartesian_error(desired_LH_lift_pose[ii, :], actual_LH_lift_pose, rotation_rep='quat')\n #error2 = compute_cartesian_error(desired_RH_lift_pose[ii, :], actual_RH_lift_pose, rotation_rep='quat')\n\n xdot = compute_cartesian_error(desired_LH_reach_pose[ii+1, :], desired_LH_reach_pose[ii, :], rotation_rep='quat')#error1\n xdot2 = compute_cartesian_error(desired_RH_reach_pose[ii+1, :], desired_RH_reach_pose[ii, :], rotation_rep='quat')#error1\n\n # Compute the jacobian matrix\n #rbdl.CalcPointJacobian6D(robot_model.model, q, model.GetBodyId(LH_name), np.zeros(0), J1, True)\n robot_model.update_jacobian(J1, LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J2, RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n J1[:, bigman_params['joint_ids']['TO']] = 0\n J2[:, bigman_params['joint_ids']['TO']] = 0\n #print(J1)\n\n qdot = np.linalg.lstsq(J1, xdot)[0]\n qdot2 = np.linalg.lstsq(J2, xdot2)[0]\n\n #qdot[bigman_params['joint_ids']['RA']] = qdot[bigman_params['joint_ids']['LA']]*right_sign\n qdot[bigman_params['joint_ids']['RA']] = qdot2[bigman_params['joint_ids']['RA']]\n\n q[:] += qdot\n\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n\n des_cmd.position = q\n publisher.publish(des_cmd)\n\n joint_reach_trajectory[ii+1, :] = q[:]\n\n pub_rate.sleep()\n\nelif reach_option == 3:\n N = int(np.ceil(T_reach*freq))\n q_reach2 = robot_model.ik(RH_name, desired_RH_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method='optimization')\n joint_init_traj = polynomial5_interpolation(N, q_reach2, joint_state)[0]\n raw_input(\"Press key for moving to INIT\")\n for ii in range(N):\n des_cmd.position = joint_init_traj[ii, :]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n\nraw_input(\"Press key for LIFTING\")\nif lift_option == 0 or lift_option == 1:\n for ii in range(joint_lift_trajectory.shape[0]):\n #print(\"Sending LIFTING cmd...\")\n #error = joint_lift_trajectory[ii, :] - joint_state\n #print(error[bigman_params['joint_ids']['BA']])\n #des_cmd.position += K*error\n des_cmd.position = joint_lift_trajectory[ii, :]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\nelif lift_option == 2:\n joint_lift_trajectory = np.empty((desired_LH_lift_pose.shape[0], robot_model.q_size))\n q = q_reach.copy()\n joint_lift_trajectory[0, :] = q[:]\n for ii in range(desired_LH_lift_pose.shape[0]-1):\n #for ii in range(N-1):\n print(\"Sending LIFTING cmd...\")\n #error1 = compute_cartesian_error(desired_LH_lift_pose[ii, :], actual_LH_lift_pose, rotation_rep='quat')\n #error2 = compute_cartesian_error(desired_RH_lift_pose[ii, :], actual_RH_lift_pose, rotation_rep='quat')\n\n xdot = compute_cartesian_error(desired_LH_lift_pose[ii+1, :], desired_LH_lift_pose[ii, :], rotation_rep='quat')#error1\n xdot2 = compute_cartesian_error(desired_RH_lift_pose[ii+1, :], desired_RH_lift_pose[ii, :], rotation_rep='quat')#error1\n\n # Compute the jacobian matrix\n #rbdl.CalcPointJacobian6D(robot_model.model, q, model.GetBodyId(LH_name), np.zeros(0), J1, True)\n robot_model.update_jacobian(J1, LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J2, RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n J1[:, bigman_params['joint_ids']['TO']] = 0\n J2[:, bigman_params['joint_ids']['TO']] = 0\n #print(J1)\n\n qdot = np.linalg.lstsq(J1, xdot)[0]\n qdot2 = np.linalg.lstsq(J2, xdot2)[0]\n\n #qdot[bigman_params['joint_ids']['RA']] = qdot[bigman_params['joint_ids']['LA']]*right_sign\n qdot[bigman_params['joint_ids']['RA']] = qdot2[bigman_params['joint_ids']['RA']]\n\n q[:] += qdot\n\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n\n des_cmd.position = q\n publisher.publish(des_cmd)\n\n joint_lift_trajectory[ii+1, :] = q[:]\n pub_rate.sleep()\n\n\nsave_reach_traj = raw_input(\"Save reach trajectory? (y/yes): \")\nif save_reach_traj.lower() in ['y', 'yes']:\n np.save(file_name + '_m' + str(reach_option) + '_reach.npy', joint_reach_trajectory)\n if reach_option == 2:\n np.save(file_name+'_reach_LH_EE.npy', desired_LH_reach_pose)\n np.save(file_name+'_reach_RH_EE.npy', desired_RH_reach_pose)\n\n\n\nsave_lift_traj = raw_input(\"Save lift trajectory? (y/yes): \")\nif save_lift_traj.lower() in ['y', 'yes']:\n np.save(file_name + '_m' + str(lift_option) + '_lift.npy', joint_lift_trajectory)\n if lift_option == 2:\n np.save(file_name+'_lift_LH_EE.npy', desired_LH_lift_pose)\n np.save(file_name+'_lift_RH_EE.npy', desired_RH_lift_pose)\n\n#plt.plot(desired_LH_lift_pose[:, -1], 'r')\n#plt.show()\n" }, { "alpha_fraction": 0.6998223662376404, "alphanum_fraction": 0.721136748790741, "avg_line_length": 32.05882263183594, "blob_id": "781850fa199051039665906f335bea5e74a3e75f", "content_id": "005f79c3a9acb75acb143972dcab50ec05bafd06", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 563, "license_type": "permissive", "max_line_length": 107, "num_lines": 17, "path": "/scenarios/tests/reset-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.old_utils.tasks.bigman.lift_box_utils import *\n\nrospack = rospkg.RosPack()\nbox_sdf = open(rospack.get_path('robolearn_gazebo_env')+'/models/cardboard_cube_box/model.sdf', 'r').read()\n\nmodel_name = 'box'\nmodel_sdf = box_sdf\nmodel_pose = [0.75, 0, 0.019, 0, 0, 0, 1]\n\n#reset_gazebo_env = ResetGazeboEnv(model_names, model_sdfs, model_poses)\n#reset_gazebo_env.reset()\n\n#delete_gazebo_model(model_name)\n#spawn_gazebo_model(model_name, model_sdf, model_pose)\n#bigman_pose = get_gazebo_model_pose('bigman', 'world')\n\nreset_bigman_box_gazebo(model_pose)\n\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 25.799999237060547, "blob_id": "5a9d826e00a587711d6e953b7d7d7e6635104be8", "content_id": "63a69db342bdd7e1edcc2de4321221bf5c0309ab", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "permissive", "max_line_length": 37, "num_lines": 5, "path": "/robolearn/utils/plots/canvas_draw.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "def canvas_draw(canvas, interval):\n if canvas.figure.stale:\n canvas.draw()\n canvas.start_event_loop(interval)\n return\n\n" }, { "alpha_fraction": 0.5848731994628906, "alphanum_fraction": 0.5921787619590759, "avg_line_length": 34.79999923706055, "blob_id": "fdebdf2adcb874be1995de0c9495e15069aca2e3", "content_id": "8b5919c62fcd13b8c1a5b4e3b4095ee9071e3614", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2327, "license_type": "permissive", "max_line_length": 79, "num_lines": 65, "path": "/robolearn/utils/data_management/simple_replay_buffer.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\nfrom robolearn.utils.data_management.replay_buffer import ReplayBuffer\n\n\nclass SimpleReplayBuffer(ReplayBuffer):\n def __init__(\n self, max_replay_buffer_size, obs_dim, action_dim,\n ):\n if not max_replay_buffer_size > 1:\n raise ValueError(\"Invalid Maximum Replay Buffer Size: {}\".format(\n max_replay_buffer_size)\n )\n\n max_size = int(max_replay_buffer_size)\n\n self._obs_buffer = np.zeros((max_size, obs_dim), dtype=np.float32)\n self._next_obs_buffer = np.zeros((max_size, obs_dim), dtype=np.float32)\n self._acts_buffer = np.zeros((max_size, action_dim), dtype=np.float32)\n self._rewards_buffer = np.zeros((max_size, 1), dtype=np.float32)\n self._terminals_buffer = np.zeros((max_size, 1), dtype='uint8')\n\n self._obs_dim = obs_dim\n self._action_dim = action_dim\n self._max_size = max_size\n self._top = 0\n self._size = 0\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n self._obs_buffer[self._top] = observation\n self._acts_buffer[self._top] = action\n self._rewards_buffer[self._top] = reward\n self._terminals_buffer[self._top] = terminal\n self._next_obs_buffer[self._top] = next_observation\n self._advance()\n\n def terminate_episode(self):\n pass\n\n def _advance(self):\n self._top = (self._top + 1) % self._max_size\n if self._size < self._max_size:\n self._size += 1\n\n def random_batch(self, batch_size):\n if batch_size > self._size:\n raise AttributeError('Not enough samples to get. %d bigger than '\n 'current %d!' % (batch_size, self._size))\n\n indices = self.random_indices(0, self._size, batch_size)\n return dict(\n observations=self._obs_buffer[indices],\n actions=self._acts_buffer[indices],\n rewards=self._rewards_buffer[indices],\n terminals=self._terminals_buffer[indices],\n next_observations=self._next_obs_buffer[indices],\n )\n\n def available_samples(self):\n return self._size\n\n @staticmethod\n def random_indices(low, high, size):\n return np.random.randint(low, high, size)\n" }, { "alpha_fraction": 0.5374945402145386, "alphanum_fraction": 0.5659991502761841, "avg_line_length": 27.32712173461914, "blob_id": "71c4620f0593b98e26c9101be87df5d397f78905", "content_id": "1eaddc10c1b0cc87e96303c3508c73bcb5098e60", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13682, "license_type": "permissive", "max_line_length": 159, "num_lines": 483, "path": "/examples/rl_algos/gps/centauro_mdgps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn_gym_envs.pybullet import CentauroObstacleEnv\n\nfrom robolearn.algorithms.rl_algos import MDGPS\n\nfrom robolearn.algorithms.rl_algos import init_pd\nfrom robolearn.torch.policies.gaussian_policy import GaussianPolicy\n\nfrom robolearn.algorithms.rl_algos import CostSum\nfrom robolearn.algorithms.rl_algos import CostState\nfrom robolearn.algorithms.rl_algos import CostInitialState\nfrom robolearn.algorithms.rl_algos import CostSafeDistance\nfrom robolearn.algorithms.rl_algos import CostAction\nfrom robolearn.algorithms.rl_algos import RAMP_CONSTANT\nfrom robolearn.algorithms.rl_algos import RAMP_FINAL_ONLY\n\nfrom robolearn_gym_envs.utils.transformations import create_quat_pose\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nimport robolearn.torch.utils.pytorch_util as ptu\n\nimport argparse\n\n# np.set_printoptions(precision=3, suppress=True)\n\nTEND = 4.0\nSIM_TIMESTEP = 0.01\nFRAME_SKIP = 1\nTS = FRAME_SKIP * SIM_TIMESTEP\nT = int(TEND/TS)\n\nGPU = True\n# GPU = False\n\nSEED = 450\n\nptu.set_gpu_mode(GPU)\n\nnp.random.seed(SEED)\nptu.seed(SEED)\n\nnoise_hyperparams = dict(\n smooth_noise=True, # Apply Gaussian filter to noise generated\n smooth_noise_var=2.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n smooth_noise_renormalize=True, # Renormalize smooth noise to have variance=1\n noise_var_scale=1.e-5*np.array([1., 1., 1., 1., .1, 0.1, 0.1]), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n)\n\nalgo_params = dict(\n seed=SEED,\n nepochs=100,\n num_samples=3,\n test_samples=1,\n noisy_samples=True,\n # noisy_samples=False,\n train_conds=[0, 1, 2],\n test_conds=[0, 1, 2],\n base_kl_step=0.05, # 0.01,\n # base_kl_step=5000000.0,\n global_opt_iters=5000,\n # global_opt_iters=50,\n global_opt_batch_size=128,\n global_opt_lr=1e-2,\n # TRAJ OPT\n # --------\n traj_opt_prev='nn_pol',\n # traj_opt_prev='traj',\n traj_opt_iters=1,\n traj_opt_min_eta=1e-8,\n traj_opt_max_eta=1e3, # 1e16\n)\n\nenv_params = dict(\n is_render=True,\n # is_render=False,\n obs_with_img=False,\n active_joints='RA',\n control_mode='tasktorque',\n # _control_mode='velocity',\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n obs_distances=True,\n goal_tolerance=0.02,\n max_time=None,\n)\n\npolicy_params = dict(\n global_pol_params=dict(\n hidden_sizes=(128, 128),\n hidden_activation='relu',\n # output_w_init='xavier_normal',\n output_w_init='xavier_normal_0.01',\n output_b_init_val=0.0,\n ),\n local_pol_params=dict(\n max_var=0.01,\n )\n)\n\n# Fixed initial conditions [tgtX, tgtY, tgtZ, tgtROLL, obstX, obstY]\ninitial_conds = [\n [1.12, -0.2, 0.99, 0.1, 1.08, -0.35],\n [1.13, -0.2, 0.99, 0.1, 1.08, -0.35],\n [1.125, -0.2, 0.95, 0.1, 1.07, -0.35],\n [1.12, -0.2, 0.94, 0.1, 1.08, -0.35],\n [1.13, -0.2, 1.0, 0.1, 1.08, -0.35],\n]\n\ncost_params = dict(\n # COSTS:\n # 0: action,\n # 1: target_state\n # 2: target_state_final\n # 3: safe_distance\n # 4: safe_distance_final\n # 5: safe_distance_final\n # 6: velocity\n costs_to_consider=[0, 1, 2, 6],\n cost_weights=[1.0e-1, 1.0e+0, 1.0e-2, 0.0e+0, 0.0e+0, 1.0e-4, 1.0e+0],\n)\n\nexpt_params = dict(\n # General Algo\n algo_params=algo_params,\n cost_params=cost_params,\n env_params=env_params,\n policy_params=policy_params,\n initial_conditions=initial_conds,\n)\n\n\ndef create_environment():\n print(\"\\nCreating Environment...\")\n\n env = CentauroObstacleEnv(**env_params)\n\n # env.set_tgt_cost_weights(tgt_weights)\n # env.set_tgt_pos(tgt_positions)\n\n print(\"Environment:%s OK!.\" % type(env).__name__)\n\n obst_height = 0.851\n\n for init_cond in initial_conds:\n robot_config = env._init_robot_config\n tgt_state = create_quat_pose(pos_x=init_cond[0],\n pos_y=init_cond[1],\n pos_z=init_cond[2],\n rot_roll=init_cond[3])\n\n obst_state = create_quat_pose(pos_x=init_cond[4],\n pos_y=init_cond[5],\n pos_z=obst_height,\n rot_yaw=0.)\n\n env.add_initial_condition(robot_config=robot_config,\n tgt_state=tgt_state,\n obst_state=obst_state)\n\n print(\"Conditions for Environment:%s OK!.\" % type(env).__name__)\n\n return env\n\n\ndef create_cost_fcn(env):\n\n # ########### #\n # ACTION COST #\n # ########### #\n cost_action = CostAction(\n wu=np.ones(env.action_dim),\n target=None,\n )\n\n # ################# #\n # TARGET STATE COST #\n # ################# #\n # TODO: ASSUMING ENV IS OBS_DISTANCES\n\n state_idxs = [\n # env.get_obs_info(name='position')['idx'], # MOVE ARM\n env.get_obs_info(name='target')['idx'], # Move to target\n ]\n target_states = [\n # [-0., 0.2, 0.8, 0.8, 0., 0.8, -0.], # Moving only 2nd joint\n [0., 0., 0., 0., 0., 0.], # Moving to target\n ]\n wps = [\n # np.array([1., 1., 1., 1., 1., 1., 1.]), # MOVE ARM\n np.array([1.3, 2., 1., 0.1, 0.1, 0.1]),\n ]\n cost_target_state = CostState(\n ramp_option=RAMP_CONSTANT,\n state_idxs=state_idxs,\n target_states=target_states,\n wps=wps,\n wp_final_multiplier=1.0e0,\n cost_type='logl2',\n l1_weight=0.,\n l2_weight=1.,\n alpha=1.e-2\n\n )\n cost_target_state_final = CostState(\n ramp_option=RAMP_FINAL_ONLY,\n state_idxs=state_idxs,\n target_states=target_states,\n wps=wps,\n wp_final_multiplier=1.0e0,\n cost_type='logl2',\n l1_weight=0.,\n l2_weight=1.,\n alpha=1.e-2\n\n )\n\n\n # ################## #\n # SAFE DISTANCE COST #\n # ################## #\n\n safe_x = 0.15\n safe_y = 0.15\n safe_z = 0.25\n state_idxs = [\n env.get_obs_info(name='obstacle')['idx'][:3],\n ]\n safe_distances = [\n [safe_x, safe_y, safe_z],\n ]\n wps = [\n np.array([1., 1., 1.]),\n ]\n inside_costs = [\n [1., 1., 1.],\n ]\n outside_costs = [\n [0., 1., 0.],\n ]\n\n cost_safe_distance = CostSafeDistance(\n ramp_option=RAMP_CONSTANT,\n state_idxs=state_idxs,\n safe_distances=safe_distances,\n wps=wps,\n inside_costs=inside_costs,\n outside_costs=outside_costs,\n wp_final_multiplier=1.0,\n )\n\n cost_safe_distance_final = CostSafeDistance(\n ramp_option=RAMP_FINAL_ONLY,\n state_idxs=state_idxs,\n safe_distances=safe_distances,\n wps=wps,\n inside_costs=inside_costs,\n outside_costs=outside_costs,\n wp_final_multiplier=1.0,\n )\n\n # ######### #\n # POSE COST #\n # ######### #\n state_idxs = [\n env.get_obs_info(name='position')['idx'], # MOVE ARM\n ]\n target_states = [\n [-0., 0.0, 0.0, 0.0, 0., 0.0, -0.], # Moving only 2nd joint\n ]\n wps = [\n np.array([1., 1., 1., 1., 1., 1., 1.]), # MOVE ARM\n ]\n cost_initial_state = CostInitialState(\n ramp_option=RAMP_CONSTANT,\n state_idxs=state_idxs,\n target_states=target_states,\n wps=wps,\n wp_final_multiplier=1.0e0,\n cost_type='logl2',\n l1_weight=0.,\n l2_weight=1.,\n alpha=1.e-2\n\n )\n\n # ######### #\n # POSE VELOCITY #\n # ######### #\n state_idxs = [\n env.get_obs_info(name='velocity')['idx'], # MOVE ARM\n ]\n target_states = [\n [0., 0.0, 0.0, 0.0, 0., 0.0, 0.],\n ]\n wps = [\n np.array([1., 1., 1., 1., 1., 1., 1.]), # MOVE ARM\n ]\n cost_velocity = CostState(\n ramp_option=RAMP_CONSTANT,\n state_idxs=state_idxs,\n target_states=target_states,\n wps=wps,\n wp_final_multiplier=1.0e0,\n cost_type='logl2',\n l1_weight=0.,\n l2_weight=1.,\n alpha=1.e-2\n\n )\n\n\n # ######### #\n # COST SUM #\n # ######### #\n\n all_costs = [\n cost_action,\n cost_target_state,\n cost_target_state_final,\n cost_safe_distance,\n cost_safe_distance_final,\n cost_initial_state,\n cost_velocity,\n ]\n\n cost_idxs = cost_params['costs_to_consider']\n weights = cost_params['cost_weights']\n\n cost_sum = CostSum(costs=[all_costs[idx] for idx in cost_idxs],\n weights=[weights[idx] for idx in cost_idxs])\n\n return cost_sum\n\n\ndef create_policies(env):\n train_conds = expt_params['algo_params']['train_conds']\n local_policies = list()\n for cc in train_conds:\n tvlgc_pol = init_pd(dU=env.action_dim,\n dX=env.obs_dim,\n T=T,\n x0=env.initial_obs_conditions[cc],\n state_idx=env.get_obs_info(name='position')['idx'],\n dstate_idx=None,\n pos_gains=0.00005, # For dU\n Kp=1,\n Kv=0.00001,\n init_var=np.array(policy_params['local_pol_params']['max_var']),\n )\n\n local_policies.append(tvlgc_pol)\n\n global_policy = GaussianPolicy(\n obs_dim=env.obs_dim,\n action_dim=env.action_dim,\n hidden_sizes=policy_params['global_pol_params']['hidden_sizes'],\n hidden_activation=policy_params['global_pol_params']['hidden_activation'],\n output_w_init=policy_params['global_pol_params']['output_w_init'],\n output_b_init_val=policy_params['global_pol_params']['output_b_init_val'],\n )\n\n return local_policies, global_policy\n\n\ndef create_algo(env, local_policies, global_policy, cost_fcn):\n train_conds = algo_params['train_conds']\n test_conds = algo_params['test_conds']\n noisy_samples = algo_params['noisy_samples']\n num_samples = expt_params['algo_params']['num_samples']\n test_samples = expt_params['algo_params']['test_samples']\n seed = expt_params['algo_params']['seed']\n\n\n algo = MDGPS(\n env=env,\n local_policies=local_policies,\n global_policy=global_policy,\n cost_fcn=cost_fcn,\n eval_env=env,\n num_epochs=algo_params['nepochs'],\n num_steps_per_epoch=int(len(local_policies)*len(train_conds)*num_samples*T),\n num_steps_per_eval=int(T*len(test_conds)),\n max_path_length=T,\n train_cond_idxs=algo_params['train_conds'],\n test_cond_idxs=algo_params['test_conds'],\n num_samples=num_samples,\n test_samples=test_samples,\n noisy_samples=noisy_samples,\n noise_hyperparams=noise_hyperparams,\n seed=seed,\n base_kl_step=algo_params['base_kl_step'],\n global_opt_iters=algo_params['global_opt_iters'],\n global_opt_batch_size=algo_params['global_opt_batch_size'],\n global_opt_lr=algo_params['global_opt_lr'],\n traj_opt_prev=algo_params['traj_opt_prev'],\n traj_opt_iters=algo_params['traj_opt_iters'],\n traj_opt_min_eta=algo_params['traj_opt_min_eta'],\n traj_opt_max_eta=algo_params['traj_opt_max_eta'],\n save_algorithm=False,\n save_environment=False,\n )\n\n return algo\n\n\n# ####################\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=25)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n # GPU arguments\n parser.add_argument('--gpu', action=\"store_true\")\n # Other arguments\n parser.add_argument('--render', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\nargs = parse_args()\n\n# Experiment name\nif args.expt_name is None:\n expt_name = 'centauro_mdgps'\nelse:\n expt_name = args.expt_name\n\nlog_dir = setup_logger(expt_name,\n variant=expt_params,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n\nprint('***********************\\n'*5)\nprint(\"Logging in directory: %s\" % log_dir)\nprint('***********************\\n'*5)\n\n\nenv = create_environment()\n#\n# for cc in range(env.n_init_conds):\n# env.reset(condition=cc)\n# print(cc)\n# input('wuuu')\n\ncost_fcn = create_cost_fcn(env)\n\nlocal_policies, global_policy = create_policies(env)\n\nmdgps_algo = create_algo(env, local_policies, global_policy, cost_fcn)\n\n# if ptu.gpu_enabled():\n# mdgps_algo.cuda()\nif ptu.gpu_enabled():\n global_policy.cuda()\n\nstart_epoch = 0\nmdgps_algo.train(start_epoch=start_epoch)\n\n# action_dim = env.action_dim\n# obs_dim = env.obs_dim\n# state_dim = env.state_dim\n#\n# print(action_dim, obs_dim, state_dim)\n#\n# fake_sample = dict(\n# actions=np.random.rand(10, action_dim),\n# observations=np.random.rand(10, obs_dim)\n# )\n# fake_sample['actions'][-1] = 0\n# fake_sample['observations'][-1, env.get_obs_info(name='position')['idx']] = \\\n# [-0., 0.2, 0.8, 0.8, 0., 0.8, -0.]\n#\n# l, lx, lu, lxx, luu, lux, cost_composition = cost_fcn.eval(fake_sample)\n# print('l', l)\n# for ii, cost_compo in enumerate(cost_composition):\n# print('cost_compo', ii, ': ', cost_compo)\n\n# input('MDGPS IS OVER')\n" }, { "alpha_fraction": 0.48121675848960876, "alphanum_fraction": 0.48753324151039124, "avg_line_length": 31.85792350769043, "blob_id": "f8ebf99fe0eb7ff0c87d32ac083d70b10cb82d49", "content_id": "b1086074318981c8ab0e665f26638a6cfc3c91de", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6016, "license_type": "permissive", "max_line_length": 97, "num_lines": 183, "path": "/robolearn/algorithms/traj_opt/iLQR.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\niLQR\nBased on C. Finn GPS\n\"\"\"\nimport numpy as np\nimport scipy as sp\nfrom numpy.linalg import LinAlgError\nimport copy\n\nfrom .base import TrajOpt\n\n\nDEFAULT_HYPERPARAMS = dict(\n # Dual variable updates for non-PD Q-function.\n del0=1e-4,\n eta_error_threshold=1e16,\n min_eta=1e-8,\n max_eta=1e16,\n cons_per_step=False, # Whether or not to enforce separate KL constraints at each time step.\n use_prev_distr=False, # Whether or not to measure expected KL under the previous traj distr.\n update_in_bwd_pass=True, # Whether or not to update the TVLG controller during the bwd pass.\n)\n\n\nclass iLQR(object):\n \"\"\" iterative LQR \"\"\"\n def __init__(self, horizon, state_dim, action_dim, cost_fcn, delta=1e-4):\n\n self._T = horizon\n self._dX = state_dim\n self._dU = action_dim\n self._cost_fcn = cost_fcn\n self._delta = delta\n\n def update(self, traj_distr, linear_dynamics, cost_fcn, x0mu, x0sigma):\n\n new_mu, new_sigma = self.forward(traj_distr, linear_dynamics,\n x0mu, x0sigma)\n\n def forward(self, traj_distr, linear_dynamics, x0mu, x0sigma):\n \"\"\"\n Perform LQR forward pass. Computes state-action marginals from dynamics\n and policy.\n Args:\n traj_distr:\n\n Returns:\n mu: T x dX mean state-action vector\n sigma: T x dX x dX state-action covariance matrix\n\n \"\"\"\n\n # Allocate space\n sigma = np.zeros((self._T, self._dX+self._dU, self._dX+self._dU))\n mu = np.zeros((self._T, self._dX+self._dU))\n\n # Get dynamics\n Fm = linear_dynamics.Fm\n fv = linear_dynamics.fv\n dyn_covar = linear_dynamics.dyn_covar\n\n # Get TVLGC params\n K = traj_distr.K\n k = traj_distr.k\n pol_covar = traj_distr.pol_covar\n\n # Indexes\n idx_x = slice(self._dX)\n\n # Set initial covariance (initial mu is always zero)\n sigma[0, idx_x, idx_x] = x0sigma\n mu[0, idx_x] = x0mu\n\n for t in range(self._T):\n # Update Covariance\n sigma[t, :, :] = np.vstack((\n # dX x dX+dU\n np.hstack((\n sigma[t, idx_x, idx_x],\n sigma[t, idx_x, idx_x].dot(K[t, :, :].T),\n )),\n # dU x dX+dU\n np.hstack((\n K[t, :, :].dot(sigma[t, idx_x, idx_x]),\n K[t, :, :].dot(sigma[t, idx_x, idx_x]).dot(K[t, :, :].T) +\n pol_covar[t, :, :],\n ))\n ))\n # Update Action mean\n mu[t, :] = np.hstack((\n mu[t, idx_x],\n K[t, :, :].dot(mu[t, idx_x]) + k[t, :]\n ))\n\n # Forward Dynamics\n if t < self._T - 1:\n sigma[t+1, idx_x, idx_x] = \\\n Fm[t, :, :].dot(sigma[t, :, :]).dot(Fm[t, :, :].T) + \\\n dyn_covar[t, :, :]\n mu[t+1, idx_x] = Fm[t, :, :].dot(mu[t, :]) + fv[t, :]\n\n return mu, sigma\n\n def backward(self, prev_traj_distr, linear_dynamics, costs):\n traj_distr = prev_traj_distr.copy()\n K = traj_distr.K\n k = traj_distr.k\n inv_pol_covar = traj_distr.inv_pol_covar\n pol_covar = traj_distr.pol_covar\n chol_pol_covar = traj_distr.chol_pol_covar\n\n # Indexes\n idx_x = slice(self._dX)\n idx_u = slice(self._dX, self._dX + self._dU)\n\n # Get Dynamics Matrix and Vector\n Fm = linear_dynamics.Fm\n fv = linear_dynamics.fv\n\n # Non-SPD correction terms\n delta = self._delta\n\n # Solve triangular Function\n solve_triangular = sp.linalg.solve_triangular\n\n # ################### #\n # Dynamic Programming #\n # ################### #\n\n # Allocate\n Vxx = np.zeros((self._T, self._dX, self._dX))\n Vx = np.zeros((self._T, self._dX))\n Qtt = np.zeros((self._T, self._dX+self._dU, self._dX+self._dU))\n Qt = np.zeros((self._T, self._dX+self._dU))\n\n # Gradient and Hessian of Reward\n fCm = costs.fCm\n fcv = costs.fcv\n\n # Compute state-action-state function at each time step\n for t in range(self._T - 1, -1, -1):\n # Add in the cost gradient and Hessian respectively\n Qtt[t] = fCm[t, :, :] # (X+U) x (X+U)\n Qt[t] = fcv[t, :, :] # (X+U) x 1\n\n # Add in the state value function from the next time step\n if t < self._T - 1:\n Qtt[t] += Fm[t, :, :].T.dot(Vxx[t+1, :, :]).dot(Fm[t, :, :])\n Qt[t] += Fm[t, :, :].T.dot(\n Vx[t+1, :] + Vxx[t+1, :, :].dot(fv[t, :])\n )\n # Symmetrize quadratic component\n Qtt[t] = 0.5 * (Qtt[t] + Qtt[t].T)\n\n inv_term = Qtt[t, idx_u, idx_u] # Quu\n k_term = Qt[t, idx_u] # Qu\n K_term = Qtt[t, idx_u, idx_u]\n\n # Compute Cholesky decomposition of Q-value action component\n U = sp.linalg.cholesky(inv_term) # chol(Quu)\n L = U.T\n\n # Conditional covariance, inverse, and Cholesky decomposition\n inv_pol_covar[t, :, :] = inv_term\n pol_covar[t, :, :] = solve_triangular(\n U, solve_triangular(L, np.eye(self._dU), lower=True)\n )\n chol_pol_covar[t, :, :] = sp.linalg.cholesky(pol_covar[t, :, :])\n\n # Compute mean terms\n k[t] = -solve_triangular(\n U, solve_triangular(L, k_term, lower=True)\n )\n K[t] = -solve_triangular(\n U, solve_triangular(L, K_term, lower=True)\n )\n\n # State value gradient\n Vx[t] = Qt[t, idx_x] + Qtt[t, idx_x, idx_u].dot(k[t])\n # State value Hessian\n Vxx[t] = Qtt[t, idx_x, idx_x] + Qtt[t, idx_x, idx_u].dot(K[t])\n # Symmetrize quadratic component\n Vxx[t] = 0.5 * (Vxx[t] + Vxx[t].T)\n\n\n\n" }, { "alpha_fraction": 0.4889015257358551, "alphanum_fraction": 0.49630051851272583, "avg_line_length": 28.266666412353516, "blob_id": "08d31438f45fe1c78ce88b2560b4ef1a04bb5717", "content_id": "d4d3c9917ac13efcf6216e6f930e9efe92686d62", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1757, "license_type": "permissive", "max_line_length": 66, "num_lines": 60, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/dynamics/dynamics_lr_prior.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.algorithms.rl_algos import gauss_fit_joint_prior\n\n\nclass DynamicsLRPrior(object):\n def __init__(self, prior, sigma_regu=1e-6):\n self.Fm = None\n self.fv = None\n self.dyn_covar = None\n self._prior = prior\n self._sigma_regu = sigma_regu\n\n def update_prior(self, X, U, state_idx=None, action_idx=None):\n \"\"\" Update dynamics prior. \"\"\"\n if state_idx is not None:\n X = X[:, :, state_idx]\n\n if action_idx is not None:\n U = U[:, :, action_idx]\n\n self._prior.update(X, U)\n\n def get_prior(self):\n \"\"\" Return the dynamics prior. \"\"\"\n return self._prior\n\n def fit(self, X, U):\n \"\"\" Fit dynamics. \"\"\"\n N, T, dX = X.shape\n dU = U.shape[2]\n\n if N == 1:\n raise ValueError(\"Cannot fit dynamics on 1 path\")\n\n self.Fm = np.zeros([T, dX, dX+dU])\n self.fv = np.zeros([T, dX])\n self.dyn_covar = np.zeros([T, dX, dX])\n\n it = slice(dX+dU)\n ip = slice(dX+dU, dX+dU+dX)\n # Fit dynamics with least squares regression.\n dwts = (1.0 / N) * np.ones(N)\n\n for t in range(T - 1):\n Ys = np.c_[X[:, t, :], U[:, t, :], X[:, t+1, :]]\n\n # Obtain Normal-inverse-Wishart prior.\n mu0, Phi, mm, n0 = self._prior.eval(dX, dU, Ys)\n sig_reg = np.zeros((dX+dU+dX, dX+dU+dX))\n sig_reg[it, it] = self._sigma_regu\n\n Fm, fv, dyn_covar = gauss_fit_joint_prior(\n Ys, mu0, Phi, mm, n0, dwts, dX+dU, dX, sig_reg\n )\n\n self.Fm[t, :, :] = Fm\n self.fv[t, :] = fv\n self.dyn_covar[t, :, :] = dyn_covar\n\n return self.Fm, self.fv, self.dyn_covar\n\n" }, { "alpha_fraction": 0.5248091816902161, "alphanum_fraction": 0.5305343270301819, "avg_line_length": 19.959999084472656, "blob_id": "99a2833baeb5983bba4a0ed536cfd4d084daff7a", "content_id": "7973800e6026867b644b81fac1a74cbdafa544e9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "permissive", "max_line_length": 74, "num_lines": 25, "path": "/setup.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "# setup.py\nfrom setuptools import setup\n\nsetup(\n name='robolearn',\n version='0.2.0',\n description='A Robot-Learning package: Robot reinforcement learning.',\n maintainer=\"Domingo Esteban\",\n maintainer_email=\"[email protected]\",\n packages=['robolearn'],\n install_requires=[\n 'gym',\n 'numpy',\n 'torch',\n 'robolearn_gym_envs',\n 'tqdm',\n 'python-dateutil',\n 'joblib',\n 'matplotlib',\n 'pandas',\n 'gtimer',\n 'ipython',\n 'tensorboardX',\n ],\n)\n" }, { "alpha_fraction": 0.4968690574169159, "alphanum_fraction": 0.5213474035263062, "avg_line_length": 51.407344818115234, "blob_id": "0acca3f08760d8181432087e5a19ec6f24a0be49", "content_id": "fb1a907b1341a510f7bb5b9b25006c7773cfa867", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91346, "license_type": "permissive", "max_line_length": 241, "num_lines": 1743, "path": "/scenarios/tests/load_plot_good_bad_data.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import MaxNLocator\nimport pickle\nimport math\nimport os, sys\nfrom robolearn.old_utils.plot_utils import plot_sample_list, plot_sample_list_distribution, lqr_forward, plot_3d_gaussian\nfrom robolearn.old_algos.gps.gps_utils import IterationData\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.traj_opt.traj_opt_utils import traj_distr_kl, traj_distr_kl_alt\nimport scipy.stats\nfrom matplotlib.patches import Ellipse\n\n# gps_directory_name = 'GPS_2017-09-01_15:22:55' # Test MDGPS | Weekend\n# gps_directory_name = 'GPS_2017-09-04_10:45:00' # Test MDGPS | New cov_bad\n# gps_directory_name = 'GPS_2017-09-08_18:07:44' # Only Bad Friday 08/09\n# gps_directory_name = 'GPS_2017-09-09_15:49:05' # Normal Saturday 09/09\n# gps_directory_name = 'GPS_2017-09-10_15:30:24' # Normal Sunday 10/09 | new init_pos\n# gps_directory_name = 'GPS_2017-09-10_19:10:07' # G/B Sunday 10/09 | xi = 2.5, chi = 1.0\n# gps_directory_name = 'GPS_2017-09-11_15:25:19' # Bad | 2 hidden | xi = 5\n# gps_directory_name = 'GPS_2017-09-12_07:01:16' # Normal | 2 hidden | xi = 5\n# gps_directory_name = 'GPS_2017-09-12_17:30:19' # G/B | 2 hidden | xi=4, chi=1.0, tol_good=0.3\n# gps_directory_name = 'GPS_2017-09-12_20:08:37' # G/B | 2 hidden | xi=3, chi=1.0, tol_good=0.6\n# gps_directory_name = 'GPS_2017-09-13_07:24:42' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | Ts=0.02\n# gps_directory_name = 'GPS_2017-09-13_13:24:56' # NORMAL MDGPS PREV (New reset?)\n# gps_directory_name = 'GPS_2017-09-13_21:58:52' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 |always\n# gps_directory_name = 'GPS_2017-09-14_08:37:37' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | only_traj | max_omega=1e16\n# gps_directory_name = 'GPS_2017-09-14_09:59:56' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | only_traj | max_omega=1e2\n# gps_directory_name = 'GPS_2017-09-14_11:32:56' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | only_traj | max_omega=2e1\n# gps_directory_name = 'GPS_2017-09-14_12:35:39' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | only_traj | max_omega=2e1 | max_nu=5e0 | init_nu=0.4 init_omega=0.4\n# gps_directory_name = 'GPS_2017-09-14_13:10:46' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | only_traj | max_omega=2e0 | max_nu=2e0 | init_nu=0.2 init_omega=0.2\n# gps_directory_name = 'GPS_2017-09-14_15:46:29' # G/B | 2 hidden | xi=3, chi=1.5, tol_good=0.6 | only_traj | max_omega=5e-1 | max_nu=5e-1 | init_nu=0.001 init_omega=0.001\n# gps_directory_name = 'GPS_2017-09-14_17:05:37' # As before but MDGPS\n# gps_directory_name = 'GPS_2017-09-15_06:58:05' # As before but B-MDGPS | base_kl_bad=3 -> This reach at iter\n# gps_directory_name = 'GPS_2017-09-15_08:44:42' # As before but B-MDGPS | base_kl_bad=3.5\n# gps_directory_name = 'GPS_2017-09-15_12:33:18' # As before but B-MDGPS | base_kl_bad=4.0 -> The local are very noisy and very rude movs, but at itr=07 it lifts al lot the arm but lands almost tho the point. Itr 40 almost reaches the pose.\n# gps_directory_name = 'GPS_2017-09-15_18:22:46' # As before but D-MDGPS | bse_kl_good=1.5 | good_tol=0.6\n# gps_directory_name = 'GPS_2017-09-15_18:22:46' # As before but D-MDGPS | bse_kl_good=1.0 | good_tol=0.6 | base_kl_bad=4.0??-->\n# gps_directory_name = 'GPS_2017-09-15_18:22:46' # As before but always good and always bad\n# gps_directory_name = 'GPS_2017-09-16_02:15:49' # B-MDGPS | base_kl_bad=4.5 | only_traj??\n# gps_directory_name = 'GPS_2017-09-16_02:51:51' # B-MDGPS | base_kl_bad=4.2 bse_kl_good=1.5?? | only_traj??\n\n# MDGPS, bad-MDGPS, dual-MDGPS\n#gps_directory_names = ['GPS_2017-09-13_13:24:56', 'GPS_2017-09-13_21:58:52', 'GPS_2017-09-13_21:58:52']\n\n# PAPER\ngps_directory_names = ['GPS_2017-09-14_17:05:37', 'GPS_2017-09-15_12:33:18', 'GPS_2017-09-15_18:22:46']\n\ngps_directory_names = ['GPS_2017-09-14_17:05:37', 'GPS_2017-09-16_02:51:51', 'GPS_2017-09-15_18:22:46']\ngps_directory_names = ['GPS_2017-09-20_09:49:20', 'GPS_2017-09-20_19:23:28', 'GPS_2017-09-20_13:33:50']\n#gps_directory_names = ['GPS_2017-09-16_02:51:51', 'GPS_2017-09-16_02:51:51', 'GPS_2017-09-16_02:51:51']\n\ngps_models_labels = ['MDGPS', 'B-MDGPS', 'D-MDGPS']\n#gps_models_line_styles = [':', '--', '-']\ngps_models_line_styles = ['-', '-', '-']\ngps_models_colors = ['black', 'magenta', 'orange']\ngps_models_markers = ['^', 's', 'D']\n\nindeces_drill = np.array([27, 28, 29, 30, 31, 32])\n#failure_limits = np.array([0.1, 0.1, 0.1, 0.02, 0.02, 0.02])\n#failure_limits = np.array([0.01, 0.01, 0.01, 0.02, 0.02, 0.02])\nfailure_limits = np.array([0.1, 0.1, 0.1, 0.02, 0.02, 0.02])\nfailure_limits = np.array([0.8, 0.8, 0.8, 0.13, 0.13, 0.13])\nmax_t_acum_error = 10\n\ninit_itr = 0\nfinal_itr = 100\n#final_itr = 30\nsamples_idx = [-1] # List of samples / None: all samples\nmax_traj_plots = None # None, plot all\nlast_n_iters = None # 5 # None, plot all iterations\nitr_to_load = range(25)#[4, 9, 14, 19, 24] #range(6)#[5, 20, 29, 32, 42]#range(50) #None # range(17, 21)#[10, 11, 12, 13, 14]#[1, 5, 9, 12, 14]#, 5, 6]#, 20, 30, 40]\n#itr_to_load = [8, 10, 12, 14, 18] # Best dmdgps\n#itr_to_load = [17, 19, 21, 23, 24] # Best bmdgps\n#itr_to_load = [8, 10, 12, 14, 18] # mdgps\n\nsensed_joints = 'RA'\nmethod = 'MDGPS_MDREPS'\n\noptions = {\n 'plot_eta': False,\n 'plot_nu': False,\n 'plot_omega': False,\n 'plot_step_mult': False, # If linearized policy(then NN policy) is worse, epsilon is reduced.\n 'plot_cs': False,\n 'plot_sample_list_actions': False,\n 'plot_sample_list_states': False,\n 'plot_sample_list_obs': False,\n 'plot_sample_list_actions_dual': False,\n 'plot_policy_output': False,\n 'plot_policy_actions': False,\n 'plot_policy_states': False,\n 'plot_policy_obs': False,\n 'plot_traj_distr': False,\n 'plot_3d_traj': False,\n 'plot_3d_duality_traj': False,\n # Important\n 'plot_duality_influence': False,\n 'plot_duality_traj_distr': False,\n 'plot_policy_costs': True,\n 'plot_3d_pol_traj': False,\n 'plot_train_errors': True,\n #'plot_duality_influence': False,\n #'plot_duality_traj_distr': False,\n #'plot_policy_costs': False,\n #'plot_3d_pol_traj': True,\n #'plot_train_errors': False,\n}\n\ncs_color = 'red'\nstep_mult_color = 'red'\nsample_list_cols = 3\nplot_sample_list_max_min = False\nplot_joint_limits = True\ngps_num = 0\ntotal_gps = len(gps_directory_names)\n\n\nduality_data_options = ['plot_duality_traj_distr', 'plot_sample_list_actions_dual', 'plot_3d_duality_traj', 'plot_duality_influence']\npolicy_different_options = ['plot_policy_costs']\niteration_data_options = [key for key, value in options.items() if key not in duality_data_options+policy_different_options]\n\nload_iteration_data = any([options[key] for key in iteration_data_options])\nload_duality_data = any([options[key] for key in duality_data_options])\nload_policy_different_data = any([options[key] for key in policy_different_options])\n\n#iteration_data_options = [value for key, value in options.items() if key not in duality_data_options+policy_different_options]\n\n\niteration_data_list = [list() for _ in range(total_gps)]\ngood_duality_info_list = [list() for _ in range(total_gps)]\ngood_trajectories_info_list = [list() for _ in range(total_gps)]\nbad_duality_info_list = [list() for _ in range(total_gps)]\nbad_trajectories_info_list = [list() for _ in range(total_gps)]\niteration_ids = [list() for _ in range(total_gps)]\npol_sample_lists_costs = [list() for _ in range(total_gps)]\npol_sample_lists_cost_compositions = [list() for _ in range(total_gps)]\n\nmax_available_itr = None\n\nfor gps, gps_directory_name in enumerate(gps_directory_names):\n gps_path = '/home/desteban/workspace/robolearn/scenarios/robolearn_log/' + gps_directory_name\n for pp in range(init_itr, final_itr):\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n max_available_itr = pp\n\n if max_available_itr is not None:\n print(\"Max available iterations: %d\" % max_available_itr)\n\n if itr_to_load is None:\n if last_n_iters is not None:\n init_itr = max(max_available_itr - last_n_iters + 1, 0)\n\n if max_traj_plots is not None:\n if max_available_itr > max_traj_plots:\n itr_to_load = np.linspace(init_itr, max_available_itr, max_traj_plots, dtype=np.uint8)\n else:\n itr_to_load = range(init_itr, max_available_itr+1)\n\n else:\n itr_to_load = range(init_itr, max_available_itr+1)\n\n print(\"Iterations to load in %s: %s\" % (gps_directory_name, itr_to_load))\n for pp in itr_to_load:\n if load_iteration_data or load_duality_data or load_policy_different_data:\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n print('Loading GPS iteration_data from iteration %d' % pp)\n iteration_data_list[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num) + method.upper() +'_iteration_data_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n iteration_ids[gps].append(pp+1)\n if load_duality_data:\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num) + 'bad_duality_info_itr_'+str('%02d' % pp)+'.pkl'):\n print('Loading GPS good_data from iteration %d' % pp)\n bad_duality_info_list[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num) + 'bad_duality_info_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n good_duality_info_list[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num) + 'good_duality_info_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n bad_trajectories_info_list[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num) + 'bad_trajectories_info_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n good_trajectories_info_list[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num) + 'good_trajectories_info_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n if load_policy_different_data:\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num)+'pol_sample_cost_itr_'+str('%02d' % pp)+'.pkl'):\n print('Loading policy sample cost from iteration %d' % pp)\n pol_sample_lists_costs[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num)+'pol_sample_cost_itr_'+str('%02d' % pp)+'.pkl', 'rb')))\n if os.path.isfile(gps_path+'/' + str('gps%02d_' % gps_num)+'pol_sample_cost_composition_itr_'+str('%02d' % pp)+'.pkl'):\n print('Loading policy sample cost composition from iteration %d' % pp)\n pol_sample_lists_cost_compositions[gps].append(pickle.load(open(gps_path+'/' + str('gps%02d_' % gps_num)+'pol_sample_cost_composition_itr_'+str('%02d' % pp)+'.pkl', 'rb')))\n\n # if load_iteration_data or load_duality_data or load_policy_different_data:\n # iteration_ids[gps].append(pp) --> Now it is done before\n\n\nif load_iteration_data:\n data_list_with_data = iteration_data_list\n if not data_list_with_data:\n raise AttributeError(\"No data has been loaded. Check that files exist\")\n T = iteration_data_list[0][-1][-1].sample_list.get_actions(samples_idx).shape[1]\nelif load_duality_data:\n data_list_with_data = bad_duality_info_list\n if not data_list_with_data:\n raise AttributeError(\"No data has been loaded. Check that files exist\")\n T = iteration_data_list[0][-1][-1].sample_list.get_actions(samples_idx).shape[1]\nelif load_policy_different_data:\n data_list_with_data = pol_sample_lists_costs\nelse:\n raise ValueError(\"NO data has been loaded!\")\n\n# total_cond = len(pol_sample_lists_costs[0])\ntotal_itr = len(data_list_with_data[0])\ntotal_cond = len(data_list_with_data[0][0])\ncolormap = plt.cm.rainbow # nipy_spectral, Set1, Paired, winter\n\njoint_limits = [bigman_params['joints_limits'][ii] for ii in bigman_params['joint_ids'][sensed_joints]]\n\nif options['plot_eta']:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Eta values | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n colormap_list = [colormap(i) for i in np.linspace(0, 1, total_gps)]\n ax.set_prop_cycle('color', colormap_list)\n lines = [None for _ in range(total_gps)]\n labels = [None for _ in range(total_gps)]\n for gps in range(total_gps):\n etas = np.zeros(total_itr)\n for itr in range(total_itr):\n etas[itr] = iteration_data_list[gps][itr][cond].eta\n lines[gps] = ax.plot(range(1, total_itr+1), etas, color=colormap_list[gps],\n linestyle=gps_models_line_styles[gps])[0]\n labels[gps] = gps_models_labels[gps]\n ax.set_title('Eta values | Condition %d' % cond)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_nu']:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Nu values | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n colormap_list = [colormap(i) for i in np.linspace(0, 1, total_gps)]\n ax.set_prop_cycle('color', colormap_list)\n lines = [None for _ in range(total_gps)]\n labels = [None for _ in range(total_gps)]\n nus = np.zeros(total_itr)\n for gps in range(total_gps):\n for itr in range(total_itr):\n nus[itr] = iteration_data_list[gps][itr][cond].nu\n lines[gps] = ax.plot(range(1, total_itr+1), nus, color=colormap_list[gps],\n linestyle=gps_models_line_styles[gps])[0]\n labels[gps] = gps_models_labels[gps]\n ax.set_title('Nu values | Condition %d' % cond)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_omega']:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Omega values | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n colormap_list = [colormap(i) for i in np.linspace(0, 1, total_gps)]\n ax.set_prop_cycle('color', colormap_list)\n lines = [None for _ in range(total_gps)]\n labels = [None for _ in range(total_gps)]\n omegas = np.zeros(total_itr)\n for gps in range(total_gps):\n for itr in range(total_itr):\n omegas[itr] = iteration_data_list[gps][itr][cond].omega\n lines[gps] = ax.plot(range(1, total_itr+1), omegas, color=colormap_list[gps],\n linestyle=gps_models_line_styles[gps])[0]\n labels[gps] = gps_models_labels[gps]\n ax.set_title('Omega values | Condition %d' % cond)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\nif options['plot_step_mult']:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Step multiplier | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n etas = np.zeros(total_itr)\n for itr in range(total_itr):\n etas[itr] = iteration_data_list[itr][cond].step_mult\n ax.set_title('Step multiplier | Condition %d' % cond)\n ax.plot(range(1, total_itr+1), etas, color=eta_color)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\nif options['plot_cs']:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Samples Costs | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n mean_costs = np.zeros(total_itr)\n max_costs = np.zeros(total_itr)\n min_costs = np.zeros(total_itr)\n std_costs = np.zeros(total_itr)\n for itr in range(total_itr):\n samples_cost_sum = iteration_data_list[itr][cond].cs.sum(axis=1)\n mean_costs[itr] = samples_cost_sum.mean()\n max_costs[itr] = samples_cost_sum.max()\n min_costs[itr] = samples_cost_sum.min()\n std_costs[itr] = samples_cost_sum.std()\n ax.set_title('Samples Costs | Condition %d' % cond)\n ax.plot(range(1, total_itr+1), mean_costs, color=cs_color)\n ax.fill_between(range(1, total_itr+1), min_costs, max_costs, alpha=0.5, color=cs_color)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\nif options['plot_sample_list_actions_dual']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_actions(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Actions | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n actions = iteration_data_list[itr][cond].sample_list.get_actions(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(actions.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n #ax.set_xlim([0, actions.shape[2]])\n #ax.set_ylim([ymin, ymax])\n\n if plot_sample_list_max_min:\n ax.fill_between(range(actions.mean(axis=0).shape[0]), actions.min(axis=0)[:, ii],\n actions.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_sample_list_actions']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_actions(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Actions | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n actions = iteration_data_list[itr][cond].sample_list.get_actions(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(actions.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n #ax.set_xlim([0, actions.shape[2]])\n #ax.set_ylim([ymin, ymax])\n\n if plot_sample_list_max_min:\n ax.fill_between(range(actions.mean(axis=0).shape[0]), actions.min(axis=0)[:, ii],\n actions.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_sample_list_states']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][0][cond].sample_list.get_states(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('States | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n states = iteration_data_list[0][itr][cond].sample_list.get_states(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"State %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[0][itr]\n line = ax.plot(states.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n\n if plot_joint_limits and ii < len(joint_limits):\n ax.plot(np.tile(joint_limits[ii][0], [T]), color='black', linestyle='--', linewidth=1)\n ax.plot(np.tile(joint_limits[ii][1], [T]), color='black', linestyle='--', linewidth=1)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_sample_list_obs']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_obs(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Observations | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n obs = iteration_data_list[itr][cond].sample_list.get_obs(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Observation %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(obs.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_policy_output']:\n pol_sample_to_vis = -1\n pol_confidence = 0.95\n plot_confidence_interval = False\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.pol_mu.shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title(\"Policy's Actions | Condition %d\" % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n for itr in range(total_itr):\n mus = iteration_data_list[itr][cond].pol_info.pol_mu[pol_sample_to_vis]\n sigs = iteration_data_list[itr][cond].pol_info.pol_sig[pol_sample_to_vis]\n mins = np.zeros_like(mus)\n maxs = np.zeros_like(mus)\n for tt in range(mins.shape[0]):\n for dd in range(mins.shape[1]):\n mins[tt, dd], maxs[tt, dd] = scipy.stats.norm.interval(pol_confidence,\n loc=mus[tt, dd],\n scale=sigs[tt, dd, dd])\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n ax.plot(mus[:, ii], label=(\"itr %d\" % iteration_ids[itr]))\n if plot_confidence_interval:\n ax.fill_between(range(mus.shape[0]), mins[:, ii], maxs[:, ii], alpha=0.5)\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\nif options['plot_policy_actions']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.policy_samples.get_actions(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy Actions | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n actions = iteration_data_list[itr][cond].pol_info.policy_samples.get_actions(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(actions.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n #ax.set_xlim([0, actions.shape[2]])\n #ax.set_ylim([ymin, ymax])\n\n if plot_sample_list_max_min:\n ax.fill_between(range(actions.mean(axis=0).shape[0]), actions.min(axis=0)[:, ii],\n actions.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_policy_states']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.policy_samples.get_states(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy States | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n states = iteration_data_list[itr][cond].pol_info.policy_samples.get_states(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"State %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(states.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n\n if plot_joint_limits and ii < len(joint_limits):\n ax.plot(np.tile(joint_limits[ii][0], [T]), color='black', linestyle='--', linewidth=1)\n ax.plot(np.tile(joint_limits[ii][1], [T]), color='black', linestyle='--', linewidth=1)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_policy_obs']:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.policy_samples.get_obs(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy Observations | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n obs = iteration_data_list[itr][cond].pol_info.policy_samples.get_obs(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Observation %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(obs.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_traj_distr']:\n traj_distr_confidence = 0.95\n plot_confidence_interval = False\n plot_legend = True\n for cond in range(total_cond):\n dX = iteration_data_list[-1][cond].traj_distr.dX\n dU = iteration_data_list[-1][cond].traj_distr.dU\n fig_act, axs_act = plt.subplots(int(math.ceil(float(dU)/sample_list_cols)), sample_list_cols)\n fig_act.subplots_adjust(hspace=0)\n fig_act.canvas.set_window_title(\"Trajectory Distribution's Actions | Condition %d\" % cond)\n fig_act.set_facecolor((1, 1, 1))\n fig_state, axs_state = plt.subplots(int(math.ceil(float(dX)/sample_list_cols)), sample_list_cols)\n fig_state.subplots_adjust(hspace=0)\n fig_state.canvas.set_window_title(\"Trajectory Distribution's States | Condition %d\" % cond)\n fig_state.set_facecolor((1, 1, 1))\n\n for ii in range(axs_act.size):\n ax = axs_act[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n for ii in range(axs_state.size):\n ax = axs_state[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n for itr in range(total_itr):\n traj_distr = iteration_data_list[itr][cond].traj_distr\n traj_info = iteration_data_list[itr][cond].traj_info\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n T = traj_distr.T\n dU = traj_distr.dU\n dX = traj_distr.dX\n x_idxs = range(dX)\n u_idxs = range(dX, dX+dU)\n mins = np.zeros_like(mu)\n maxs = np.zeros_like(mu)\n if plot_confidence_interval:\n for tt in range(T):\n sigma_diag = np.diag(sigma[tt, :, :])\n mins[tt, :], maxs[tt, :] = scipy.stats.norm.interval(traj_distr_confidence, loc=mu[tt, :],\n scale=sigma_diag[:])\n\n for ii in range(axs_act.size):\n ax = axs_act[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dU:\n ax.set_title(\"Action %d\" % (ii+1))\n ax.plot(mu[:, u_idxs[ii]], label=(\"itr %d\" % iteration_ids[itr]))\n if plot_confidence_interval:\n ax.fill_between(T, mins[:, ii], maxs[:, ii], alpha=0.5)\n if plot_legend:\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n for ii in range(axs_state.size):\n ax = axs_state[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dX:\n ax.set_title(\"State %d\" % (ii+1))\n ax.plot(mu[:, x_idxs[ii]], label=(\"itr %d\" % iteration_ids[itr]))\n if plot_confidence_interval:\n ax.fill_between(T, mins[:, ii], maxs[:, ii], alpha=0.5)\n if plot_legend:\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\nif options['plot_duality_influence']:\n linewidth = 3.0\n state_to_plot_idx = 2\n traj_distr_confidence = 0.95\n plot_confidence_interval = False # True\n alpha_conf_int = 0.3\n plot_legend = True\n max_var = .5 # None, do not fix variance\n fig_act, ax = plt.subplots(1, 1)\n\n fig_act.canvas.set_window_title(\"Dualist Constraints Influence\")\n ii = state_to_plot_idx\n cond = 0\n dX = iteration_data_list[0][0][0].traj_distr.dX\n T = iteration_data_list[0][0][0].pol_info.policy_samples.get_states().shape[1]\n time = np.arange(0, 5, 0.02)\n x_idxs = range(dX)\n for gps in range(total_gps):\n traj_distr = iteration_data_list[gps][1][cond].traj_distr\n traj_info = iteration_data_list[gps][1][cond].traj_info\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n\n if gps == 0:\n good_distr = good_duality_info_list[gps][0][cond].traj_dist\n good_traj_info = good_trajectories_info_list[gps][0][cond]\n bad_distr = bad_duality_info_list[gps][0][cond].traj_dist\n bad_traj_info = bad_trajectories_info_list[gps][0][cond]\n\n prev_traj_distr = iteration_data_list[gps][0][cond].traj_distr\n prev_traj_info = iteration_data_list[gps][0][cond].traj_info\n\n mu_good, sigma_good = lqr_forward(good_distr, good_traj_info)\n mu_bad, sigma_bad = lqr_forward(bad_distr, bad_traj_info)\n mu_prev, sigma_prev = lqr_forward(prev_traj_distr, prev_traj_info)\n\n kl_div_good_bad = traj_distr_kl_alt(mu_good, sigma_good, good_distr, bad_distr, tot=True)\n print(\"KL_div(g||b): %f\" % kl_div_good_bad)\n\n ax.plot(time, mu_prev[:, x_idxs[ii]], label=\"Initial trajectory\", zorder=9, color='blue', linewidth=linewidth)\n ax.plot(time, mu_good[:, x_idxs[ii]], label=\"Good trajectory\", zorder=7, color='green', linewidth=linewidth)\n ax.plot(time, mu_bad[:, x_idxs[ii]], label=\"Bad trajectory\", zorder=8, color='red', linewidth=linewidth)\n\n ax.plot(time, mu[:, x_idxs[ii]], label=(\"Opt. traj. - %s\" % gps_models_labels[gps]), zorder=10, color=gps_models_colors[gps],\n linestyle=gps_models_line_styles[gps], linewidth=linewidth)\n\n ax.set_xlabel(\"Time (s)\", fontsize=30, weight='bold')\n ax.set_ylabel(\"Shoulder Yaw Angle (rad)\", fontsize=30, weight='bold')\n ax.tick_params(axis='x', labelsize=20)\n ax.tick_params(axis='y', labelsize=20)\n #ax.set_xticks(range(min_iteration, max_iteration+1, 5))\n ax.set_xlim(0, 5)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n if plot_legend:\n #legend = ax.legend(loc='lower right', ncol=1, fontsize=20)\n #legend = ax.legend(bbox_to_anchor=(1.00, 1), loc=2, borderaxespad=0., fontsize=20)\n legend = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0.5, fontsize=20)\n #legend = ax.legend(loc='best', fontsize='x-small', borderaxespad=0.)\n #legend.get_frame().set_alpha(0.4)\n\n\n\n\nif options['plot_duality_traj_distr']:\n traj_distr_confidence = 0.95\n plot_confidence_interval = False # True\n alpha_conf_int = 0.3\n plot_legend = True\n max_var = .5 # None, do not fix variance\n\n for cond in range(total_cond):\n dX = iteration_data_list[0][-1][cond].traj_distr.dX\n dU = iteration_data_list[0][-1][cond].traj_distr.dU\n\n for gps in range(total_gps):\n fig_act, axs_act = plt.subplots(int(math.ceil(float(dU)/sample_list_cols)), sample_list_cols)\n fig_act.subplots_adjust(hspace=0)\n fig_act.canvas.set_window_title(\"Trajectory Distribution Actions for %s Condition %d\" % (gps_models_labels[gps], cond))\n fig_act.set_facecolor((1, 1, 1))\n fig_state, axs_state = plt.subplots(int(math.ceil(float(dX)/sample_list_cols)), sample_list_cols)\n fig_state.subplots_adjust(hspace=0)\n fig_state.canvas.set_window_title(\"Trajectory Distribution States for %s Condition %d\" % (gps_models_labels[gps], cond))\n fig_state.set_facecolor((1, 1, 1))\n\n for ii in range(axs_act.size):\n ax = axs_act[ii/sample_list_cols, ii % sample_list_cols]\n #ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, 3*total_itr)])\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, 4)]) # prev, current, prev_good, prev_bad\n for ii in range(axs_state.size):\n ax = axs_state[ii/sample_list_cols, ii % sample_list_cols]\n #ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, 3*total_itr)])\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, 4)]) # prev, current, prev_good, prev_bad\n\n #for itr in range(total_itr):\n for itr in [1]:\n traj_distr = iteration_data_list[gps][itr][cond].traj_distr\n traj_info = iteration_data_list[gps][itr][cond].traj_info\n\n good_distr = good_duality_info_list[gps][itr-1][cond].traj_dist\n good_traj_info = good_trajectories_info_list[gps][itr-1][cond]\n bad_distr = bad_duality_info_list[gps][itr-1][cond].traj_dist\n bad_traj_info = bad_trajectories_info_list[gps][itr-1][cond]\n\n prev_traj_distr = iteration_data_list[gps][itr-1][cond].traj_distr\n prev_traj_info = iteration_data_list[gps][itr-1][cond].traj_info\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n mu_good, sigma_good = lqr_forward(good_distr, good_traj_info)\n mu_bad, sigma_bad = lqr_forward(bad_distr, bad_traj_info)\n mu_prev, sigma_prev = lqr_forward(prev_traj_distr, prev_traj_info)\n\n kl_div_good_bad = traj_distr_kl_alt(mu_good, sigma_good, good_distr, bad_distr, tot=True)\n print(\"KL_div(g||b): %f\" % kl_div_good_bad)\n\n T = traj_distr.T\n dU = traj_distr.dU\n dX = traj_distr.dX\n x_idxs = range(dX)\n u_idxs = range(dX, dX+dU)\n if plot_confidence_interval:\n mins = np.zeros_like(mu)\n maxs = np.zeros_like(mu)\n mins_good = np.zeros_like(mu_good)\n maxs_good = np.zeros_like(mu_good)\n mins_bad = np.zeros_like(mu_bad)\n maxs_bad = np.zeros_like(mu_bad)\n mins_prev = np.zeros_like(mu_prev)\n maxs_prev = np.zeros_like(mu_prev)\n for tt in range(T):\n sigma_diag = np.diag(sigma[tt, :, :])\n mins[tt, :], maxs[tt, :] = scipy.stats.norm.interval(traj_distr_confidence, loc=mu[tt, :],\n scale=sigma_diag[:])\n sigma_good_diag = np.diag(sigma_good[tt, :, :])\n if max_var is not None:\n sigma_good_diag = np.min(np.vstack((sigma_good_diag, np.ones_like(sigma_good_diag)*max_var)), axis=0)\n mins_good[tt, :], maxs_good[tt, :] = scipy.stats.norm.interval(traj_distr_confidence, loc=mu_good[tt, :],\n scale=sigma_good_diag[:])\n sigma_bad_diag = np.diag(sigma_bad[tt, :, :])\n if max_var is not None:\n sigma_bad_diag = np.min(np.vstack((sigma_bad_diag, np.ones_like(sigma_bad_diag)*max_var)), axis=0)\n mins_bad[tt, :], maxs_bad[tt, :] = scipy.stats.norm.interval(traj_distr_confidence, loc=mu_bad[tt, :],\n scale=sigma_bad_diag[:])\n sigma_prev_diag = np.diag(sigma_prev[tt, :, :])\n mins_prev[tt, :], maxs_prev[tt, :] = scipy.stats.norm.interval(traj_distr_confidence, loc=mu_prev[tt, :],\n scale=sigma_prev_diag[:])\n\n for ii in range(axs_act.size):\n ax = axs_act[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dU:\n ax.set_title(\"Action %d\" % (ii+1))\n ax.plot(mu[:, u_idxs[ii]], label=(\"itr %d\" % iteration_ids[gps][itr]), zorder=10)\n ax.plot(mu_prev[:, u_idxs[ii]], label=(\"Prev itr %d\" % iteration_ids[gps][itr]), zorder=9)\n ax.plot(mu_good[:, u_idxs[ii]], label=(\"Good itr %d\" % iteration_ids[gps][itr]), zorder=7)\n ax.plot(mu_bad[:, u_idxs[ii]], label=(\"Bad itr %d\" % iteration_ids[gps][itr]), zorder=8)\n if plot_confidence_interval:\n ax.fill_between(range(T), mins[:, u_idxs[ii]], maxs[:, u_idxs[ii]], alpha=alpha_conf_int, zorder=4)\n ax.fill_between(range(T), mins_prev[:, u_idxs[ii]], maxs_prev[:, u_idxs[ii]], alpha=alpha_conf_int, zorder=3)\n ax.fill_between(range(T), mins_good[:, u_idxs[ii]], maxs_good[:, u_idxs[ii]], alpha=alpha_conf_int, zorder=1)\n ax.fill_between(range(T), mins_bad[:, u_idxs[ii]], maxs_bad[:, u_idxs[ii]], alpha=alpha_conf_int, zorder=2)\n if plot_legend:\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n for ii in range(axs_state.size):\n ax = axs_state[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dX:\n ax.set_title(\"State %d\" % (ii+1))\n ax.plot(mu[:, x_idxs[ii]], label=(\"itr %d\" % iteration_ids[gps][itr]), zorder=10)\n ax.plot(mu_prev[:, x_idxs[ii]], label=(\"Prev itr %d\" % iteration_ids[gps][itr]), zorder=9)\n ax.plot(mu_good[:, x_idxs[ii]], label=(\"Good itr %d\" % iteration_ids[gps][itr]), zorder=7)\n ax.plot(mu_bad[:, x_idxs[ii]], label=(\"Bad itr %d\" % iteration_ids[gps][itr]), zorder=8)\n if plot_confidence_interval:\n ax.fill_between(range(T), mins[:, x_idxs[ii]], maxs[:, x_idxs[ii]], alpha=alpha_conf_int, zorder=4)\n ax.fill_between(range(T), mins_prev[:, x_idxs[ii]], maxs_prev[:, x_idxs[ii]], alpha=alpha_conf_int, zorder=3)\n ax.fill_between(range(T), mins_good[:, x_idxs[ii]], maxs_good[:, x_idxs[ii]], alpha=alpha_conf_int, zorder=1)\n ax.fill_between(range(T), mins_bad[:, x_idxs[ii]], maxs_bad[:, x_idxs[ii]], alpha=alpha_conf_int, zorder=2)\n if plot_legend:\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\nif options['plot_3d_duality_traj']:\n distance_idxs = [24, 25, 26] # NOT TO USE -1, -2, etc because it will get the mu and variance of u !!!\n linestyle = '-'\n linewidth = 1.0\n marker = None\n markersize = 5.0\n markeredgewidth = 1.0\n alpha = 1.0\n\n gauss_linestyle = ':'\n gauss_linewidth = 0.2\n gauss_marker = None\n gauss_markersize = 2.0\n gauss_markeredgewidth = 0.2\n gauss_alpha = 0.3\n\n plot_gaussian = False\n\n views = ['XY', 'XZ']\n\n #des_colormap = [colormap(i) for i in np.linspace(0, 1, total_itr)]\n des_colormap = [colormap(i) for i in np.linspace(0, 1, 4)] # prev, cur, bad_prev, good_prev\n\n for cond in range(total_cond):\n fig_3d_traj = plt.figure()\n lines = list()\n labels = list()\n\n for vv, view in enumerate(views):\n ax_traj = fig_3d_traj.add_subplot(1, len(views), vv+1, projection='3d')\n ax_traj.set_prop_cycle('color', des_colormap)\n plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)\n plot = ax_traj.plot([0], [0], [0], color='green', marker='o', markersize=10)\n\n fig_3d_traj.canvas.set_window_title(\"Expected Trajectories | Condition %d\" % cond)\n ax_traj.set_xlabel('X')\n ax_traj.set_ylabel('Y')\n ax_traj.set_zlabel('Z')\n\n if view == 'XY':\n azim = 0.\n elev = 90.\n elif view == 'XZ':\n azim = 90.\n elev = 0.\n elif view == 'YZ':\n azim = 90.\n elev = 90.\n else:\n raise AttributeError(\"Wrong view %s\" % view)\n\n ax_traj.view_init(elev=elev, azim=azim)\n\n #for itr in range(total_itr):\n for itr in [-1]:\n traj_distr = iteration_data_list[itr][cond].traj_distr\n traj_info = iteration_data_list[itr][cond].traj_info\n\n prev_traj_distr = iteration_data_list[itr-1][cond].traj_distr\n prev_traj_info = iteration_data_list[itr-1][cond].traj_info\n\n good_traj_distr = good_duality_info_list[itr-1][cond].traj_dist\n good_traj_info = good_trajectories_info_list[itr-1][cond]\n bad_traj_distr = bad_duality_info_list[itr-1][cond].traj_dist\n bad_traj_info = bad_trajectories_info_list[itr-1][cond]\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n mu_prev, sigma_prev = lqr_forward(prev_traj_distr, prev_traj_info)\n mu_good, sigma_good = lqr_forward(good_traj_distr, good_traj_info)\n mu_bad, sigma_bad = lqr_forward(bad_traj_distr, bad_traj_info)\n\n label = \"itr %d\" % iteration_ids[itr]\n label_prev = \"Prev itr %d\" % iteration_ids[itr]\n label_good = \"Good itr %d\" % iteration_ids[itr]\n label_bad = \"Bad itr %d\" % iteration_ids[itr]\n\n xs = np.linspace(5, 0, 100)\n plot = ax_traj.plot(mu[:, distance_idxs[0]],\n mu[:, distance_idxs[1]],\n zs=mu[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[0],\n label=label)[0]\n plot_prev = ax_traj.plot(mu_prev[:, distance_idxs[0]],\n mu_prev[:, distance_idxs[1]],\n zs=mu_prev[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[1],\n label=label)[0]\n plot_good = ax_traj.plot(mu_good[:, distance_idxs[0]],\n mu_good[:, distance_idxs[1]],\n zs=mu_good[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[2],\n label=label)[0]\n plot_bad = ax_traj.plot(mu_bad[:, distance_idxs[0]],\n mu_bad[:, distance_idxs[1]],\n zs=mu_bad[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[3],\n label=label)[0]\n\n if vv == 0:\n lines.append(plot)\n lines.append(plot_prev)\n lines.append(plot_good)\n lines.append(plot_bad)\n labels.append(label)\n labels.append(label_prev)\n labels.append(label_good)\n labels.append(label_bad)\n\n sigma_idx = np.ix_(distance_idxs, distance_idxs)\n plot_3d_gaussian(ax_traj, mu[:, distance_idxs], sigma[:, sigma_idx[0], sigma_idx[1]],\n sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n color=des_colormap[0], alpha=gauss_alpha, label='',\n markeredgewidth=gauss_markeredgewidth, marker=marker, markersize=markersize)\n\n # plot_3d_gaussian(ax_traj, mu_prev[:, distance_idxs], sigma_prev[:, sigma_idx[0], sigma_idx[1]],\n # sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n # color=des_colormap[1], alpha=gauss_alpha, label='',\n # markeredgewidth=gauss_markeredgewidth, marker=marker, markersize=markersize)\n\n if plot_gaussian:\n plot_3d_gaussian(ax_traj, mu_good[:, distance_idxs], sigma_good[:, sigma_idx[0], sigma_idx[1]],\n sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n color=des_colormap[2], alpha=gauss_alpha, label='',\n markeredgewidth=gauss_markeredgewidth, marker=marker, markersize=markersize)\n\n plot_3d_gaussian(ax_traj, mu_bad[:, distance_idxs], sigma_bad[:, sigma_idx[0], sigma_idx[1]],\n sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n color=des_colormap[3], alpha=gauss_alpha, label='',\n markeredgewidth=gauss_markeredgewidth, marker=marker, markersize=markersize)\n\n X = np.append(mu[:, distance_idxs[0]], 0)\n Y = np.append(mu[:, distance_idxs[1]], 0)\n Z = np.append(mu[:, distance_idxs[2]], 0)\n mid_x = (X.max() + X.min()) * 0.5\n mid_y = (Y.max() + Y.min()) * 0.5\n mid_z = (Z.max() + Z.min()) * 0.5\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n ax_traj.set_xlim(mid_x - max_range, mid_x + max_range)\n ax_traj.set_ylim(mid_y - max_range, mid_y + max_range)\n ax_traj.set_zlim(mid_z - max_range, mid_z + max_range)\n\n X_prev = np.append(mu_prev[:, distance_idxs[0]], 0)\n Y_prev = np.append(mu_prev[:, distance_idxs[1]], 0)\n Z_prev = np.append(mu_prev[:, distance_idxs[2]], 0)\n mid_x_prev = (X_prev.max() + X_prev.min()) * 0.5\n mid_y_prev = (Y_prev.max() + Y_prev.min()) * 0.5\n mid_z_prev = (Z_prev.max() + Z_prev.min()) * 0.5\n max_range_prev = np.array([X_prev.max()-X_prev.min(), Y_prev.max()-Y_prev.min(), Z_prev.max()-Z_prev.min()]).max() / 2.0\n\n ax_traj.set_xlim(mid_x_prev - max_range_prev, mid_x_prev + max_range_prev)\n ax_traj.set_ylim(mid_y_prev - max_range_prev, mid_y_prev + max_range_prev)\n ax_traj.set_zlim(mid_z_prev - max_range_prev, mid_z_prev + max_range_prev)\n\n X_good = np.append(mu_good[:, distance_idxs[0]], 0)\n Y_good = np.append(mu_good[:, distance_idxs[1]], 0)\n Z_good = np.append(mu_good[:, distance_idxs[2]], 0)\n mid_x_good = (X_good.max() + X_good.min()) * 0.5\n mid_y_good = (Y_good.max() + Y_good.min()) * 0.5\n mid_z_good = (Z_good.max() + Z_good.min()) * 0.5\n max_range_good = np.array([X_good.max()-X_good.min(), Y_good.max()-Y_good.min(), Z_good.max()-Z_good.min()]).max() / 2.0\n\n ax_traj.set_xlim(mid_x_good - max_range_good, mid_x_good + max_range_good)\n ax_traj.set_ylim(mid_y_good - max_range_good, mid_y_good + max_range_good)\n ax_traj.set_zlim(mid_z_good - max_range_good, mid_z_good + max_range_good)\n\n X_bad = np.append(mu_bad[:, distance_idxs[0]], 0)\n Y_bad = np.append(mu_bad[:, distance_idxs[1]], 0)\n Z_bad = np.append(mu_bad[:, distance_idxs[2]], 0)\n mid_x_bad = (X_bad.max() + X_bad.min()) * 0.5\n mid_y_bad = (Y_bad.max() + Y_bad.min()) * 0.5\n mid_z_bad = (Z_bad.max() + Z_bad.min()) * 0.5\n max_range_bad = np.array([X_bad.max()-X_bad.min(), Y_bad.max()-Y_bad.min(), Z_bad.max()-Z_bad.min()]).max() / 2.0\n\n ax_traj.set_xlim(mid_x_bad - max_range_bad, mid_x_bad + max_range_bad)\n ax_traj.set_ylim(mid_y_bad - max_range_bad, mid_y_bad + max_range_bad)\n ax_traj.set_zlim(mid_z_bad - max_range_bad, mid_z_bad + max_range_bad)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_3d_traj']:\n distance_idxs = [24, 25, 26] # NOT TO USE -1, -2, etc because it will get the mu and variance of u !!!\n linestyle = '-'\n linewidth = 2.5\n marker = None\n markersize = 5.0\n markeredgewidth = 1.0\n alpha = 1.0\n\n gauss_linestyle = ':'\n gauss_linewidth = 0.2\n gauss_marker = None\n gauss_markersize = 2.0\n gauss_markeredgewidth = 0.2\n gauss_alpha = 0.3\n\n views = ['XY', 'XZ']\n\n des_colormap = [colormap(i) for i in np.linspace(0, 1, total_itr)]\n\n for cond in range(total_cond):\n fig_3d_traj = plt.figure()\n lines = list()\n labels = list()\n\n for vv, view in enumerate(views):\n ax_traj = fig_3d_traj.add_subplot(1, len(views), vv+1, projection='3d')\n ax_traj.set_prop_cycle('color', des_colormap)\n plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)\n plot = ax_traj.plot([0], [0], [0], color='green', marker='o', markersize=10)\n\n fig_3d_traj.canvas.set_window_title(\"Expected Trajectories | Condition %d\" % cond)\n ax_traj.set_xlabel('X')\n ax_traj.set_ylabel('Y')\n ax_traj.set_zlabel('Z')\n\n if view == 'XY':\n azim = 0.\n elev = 90.\n elif view == 'XZ':\n azim = 90.\n elev = 0.\n elif view == 'YZ':\n azim = 90.\n elev = 90.\n else:\n raise AttributeError(\"Wrong view %s\" % view)\n\n ax_traj.view_init(elev=elev, azim=azim)\n\n for itr in range(total_itr):\n traj_distr = iteration_data_list[itr][cond].traj_distr\n traj_info = iteration_data_list[itr][cond].traj_info\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n\n label = \"itr %d\" % iteration_ids[itr]\n\n xs = np.linspace(5, 0, 100)\n plot = ax_traj.plot(mu[:, distance_idxs[0]],\n mu[:, distance_idxs[1]],\n zs=mu[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n if vv == 0:\n lines.append(plot)\n labels.append(label)\n\n sigma_idx = np.ix_(distance_idxs, distance_idxs)\n plot_3d_gaussian(ax_traj, mu[:, distance_idxs], sigma[:, sigma_idx[0], sigma_idx[1]],\n sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n color=des_colormap[itr], alpha=gauss_alpha, label='',\n markeredgewidth=gauss_markeredgewidth)\n\n X = np.append(mu[:, distance_idxs[0]], 0)\n Y = np.append(mu[:, distance_idxs[1]], 0)\n Z = np.append(mu[:, distance_idxs[2]], 0)\n mid_x = (X.max() + X.min()) * 0.5\n mid_y = (Y.max() + Y.min()) * 0.5\n mid_z = (Z.max() + Z.min()) * 0.5\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n ax_traj.set_xlim(mid_x - max_range, mid_x + max_range)\n ax_traj.set_ylim(mid_y - max_range, mid_y + max_range)\n ax_traj.set_zlim(mid_z - max_range, mid_z + max_range)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.)\n legend.get_frame().set_alpha(0.4)\n\nif options['plot_3d_pol_traj']:\n last_n_iters = 5\n\n distance_idxs = [24, 25, 26] # NOT TO USE -1, -2, etc because it will get the mu and variance of u !!!\n plot_type = '2d' # 3d or 2d\n linestyle = '-'\n linestyle_failure = ':'\n linewidth = 3.0\n marker = None\n markersize = 5.0\n markeredgewidth = 1.0\n alpha = 1.0\n\n #ax_mins = [-0.15, -0.2, -0.2]\n #ax_maxs = [0.15, 0.3, 0.4]\n ax_mins = [-0.0, -0.2, -0.2]\n ax_maxs = [0.0, 0.3, 0.4]\n ax_ticks = [[-0.1, 0.0, 0.1, 0.2],\n [-0.1, 0.0, 0.1, 0.2],\n [-0.1, 0.0, 0.1, 0.2, 0.3]]\n\n ellipse_size = [0.05, 0.02, 0.1]\n\n\n background_dir = 'drill_icons' # 'drill_icons' # 'drill_3d', 'drill_icons', 'drill_icons_svg'\n image_format = 'png' # 'png'\n fig_scale = 0.255/660.\n fig_size = np.array([700, 700, 700])*fig_scale\n\n size_x = 0.1\n size_y = 0.1\n size_z = 0.3\n relative_zero = np.array([0.00, -size_y/2-0.02, size_z/2+0.02])\n\n gauss_linestyle = ':'\n gauss_linewidth = 0.2\n gauss_marker = None\n gauss_markersize = 2.0\n gauss_markeredgewidth = 0.2\n gauss_alpha = 0.3\n\n views = ['YZ', 'XZ']\n #views = ['XZ']\n\n\n des_colormap = [colormap(i) for i in np.linspace(0, 1, total_itr)]\n if last_n_iters is not None:\n des_colormap[-last_n_iters:] = [colormap(i) for i in np.linspace(0, 1, last_n_iters)]\n\n samples_idx = -1\n\n #fig_3d_trajs = plt.figure()\n for cond in range(total_cond):\n for gps in range(total_gps):\n #fig_3d_traj = plt.figure()\n fig_3d_traj, ax_trajs = plt.subplots(1, len(views), sharex=False, sharey=True)\n\n #fig_3d_trajs = [plt.figure() for _ in range(len(views))]\n\n # ax_traj = fig_3d_traj.add_subplot(1, len(views), vv+1, projection='3d')\n lines = list()\n labels = list()\n\n fig_3d_traj.canvas.set_window_title(\"Expected Trajectories for %s Condition %d\" % (gps_models_labels[gps], cond))\n\n if plot_type == '3d':\n projection = '3d'\n else:\n projection = None\n\n for vv, view in enumerate(views):\n #if vv > 0:\n # ax_traj[vv] = fig_3d_traj.add_subplot(1, len(views), vv+1, projection=projection, sharey=ax_traj)\n #else:\n # ax_traj[vv] = fig_3d_traj.add_subplot(1, len(views), vv+1, projection=projection)\n #ax_traj = fig_3d_trajs[vv].add_subplot(1, 1, 1, projection=projection)\n\n ax_trajs[vv].set_prop_cycle('color', des_colormap)\n if plot_type == '3d':\n plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)\n plot = ax_traj.plot([0], [0], [0], color='pink', marker='o', markersize=30)\n else:\n #plot = ax_traj.plot([0], [0], color='green', marker='o', markersize=10)\n #plot = ax_trajs[vv].plot([0], [0], color='lightgreen', marker='o', markersize=30, markeredgecolor='limegreen')\n ax_names = ['X', 'Y', 'Z']\n x_index = ax_names.index(view[0])\n y_index = ax_names.index(view[1])\n e = Ellipse(xy=[0, 0], width=ellipse_size[x_index], height=ellipse_size[y_index], angle=0)\n ax_trajs[vv].add_artist(e)\n e.set_facecolor('lightgreen')\n e.set_edgecolor('limegreen')\n\n\n #fig_3d_trajs[vv].canvas.set_window_title(\"Expected Trajectories for %s| Condition %d | view %s\" % (gps_models_labels[gps], cond, view))\n if plot_type == '3d':\n ax_traj.set_xlabel('X')\n ax_traj.set_ylabel('Y')\n ax_traj.set_zlabel('Z')\n\n if view == 'XY':\n azim = 0.\n elev = 90.\n elif view == 'XZ':\n azim = 90.\n elev = 0.\n elif view == 'YZ':\n azim = 90.\n elev = 90.\n else:\n raise AttributeError(\"Wrong view %s\" % view)\n\n ax_traj.view_init(elev=elev, azim=azim)\n else:\n ax_names = ['X', 'Y', 'Z']\n x_index = ax_names.index(view[0])\n y_index = ax_names.index(view[1])\n ax_trajs[vv].set_xlabel(ax_names[x_index] + ' (m)', fontsize=20, weight='bold')\n ax_trajs[vv].set_ylabel(ax_names[y_index] + ' (m)', fontsize=20, weight='bold')\n ax_trajs[vv].tick_params(axis='x', labelsize=15)\n ax_trajs[vv].tick_params(axis='y', labelsize=15)\n\n if vv > 0:\n ax_trajs[vv].yaxis.label.set_visible(False)\n\n # Image\n if background_dir is not None:\n print('Using background: /images/'+background_dir+'/'+view+'_plot.'+image_format)\n current_path = os.path.dirname(os.path.realpath(__file__))\n img = plt.imread(current_path+'/images/'+background_dir+'/'+view+'_plot.'+image_format)\n # Image\n # if view == 'YZ':\n # center_image = np.array([215, 680])\n # elif view == 'XZ':\n # center_image = np.array([315, 680])\n center_image = np.array([315, 215, 20])\n #center_image = np.array([315, 215, 680])\n\n if last_n_iters is None or last_n_iters > len(iteration_ids[gps]):\n iteration_range = range(len(iteration_ids[gps]))\n else:\n iteration_range = range(-last_n_iters, 0)\n\n for itr in iteration_range:\n # traj_distr = iteration_data_list[itr][cond].traj_distr\n # traj_info = iteration_data_list[itr][cond].traj_info\n # mu, sigma = lqr_forward(traj_distr, traj_info)\n\n mu = iteration_data_list[gps][itr][cond].pol_info.policy_samples.get_states()[samples_idx, :, :]\n\n if itr == iteration_range[0]:\n init = mu[0, distance_idxs]\n plot = ax_trajs[vv].plot([init[x_index]], [init[y_index]], color='pink', marker='o', markersize=10,\n markeredgecolor='mediumvioletred')\n\n\n T = mu.shape[0]\n dX = mu.shape[1]\n max_good = None\n for t in range(T):\n if np.any(np.abs(mu[t, indeces_drill]) > failure_limits):\n max_good = t\n break\n\n label = \"Itr. %d\" % iteration_ids[gps][itr]\n\n xs = np.linspace(5, 0, 100)\n if plot_type == '3d':\n #plot = ax_traj.plot(mu[:, distance_idxs[0]],\n plot = ax_trajs[vv].plot(mu[:, distance_idxs[0]],\n mu[:, distance_idxs[1]],\n zs=mu[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n else:\n #plot = ax_traj.plot(mu[:, distance_idxs[x_index]],\n #print('Plotting itr %d' % itr)\n plot = ax_trajs[vv].plot(mu[:max_good, distance_idxs[x_index]],\n mu[:max_good, distance_idxs[y_index]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n\n if max_good is not None:\n ax_trajs[vv].plot(mu[max_good:, distance_idxs[x_index]],\n mu[max_good:, distance_idxs[y_index]],\n linestyle=linestyle_failure, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n if background_dir is not None and itr == iteration_range[0]:\n # extent=[left, right, bottom, top]\n extent = [-relative_zero[x_index]-center_image[x_index]*fig_scale,\n -relative_zero[x_index]+fig_size[1]-center_image[x_index]*fig_scale,\n -relative_zero[y_index]-center_image[y_index]*fig_scale,\n -relative_zero[y_index]+fig_size[1]-center_image[y_index]*fig_scale]\n #ax_traj.imshow(img, zorder=0, extent=extent)\n ax_trajs[vv].imshow(img, zorder=0, extent=extent)\n\n plot2 = ax_trajs[vv].plot(mu[-1, distance_idxs[x_index]],\n mu[-1, distance_idxs[y_index]],\n linestyle=linestyle, linewidth=linewidth, marker='P', markersize=10,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n\n if vv == 0:\n lines.append(plot)\n labels.append(label)\n\n # sigma_idx = np.ix_(distance_idxs, distance_idxs)\n # plot_3d_gaussian(ax_traj, mu[:, distance_idxs], sigma[:, sigma_idx[0], sigma_idx[1]],\n # sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n # color=des_colormap[itr], alpha=gauss_alpha, label='',\n # markeredgewidth=gauss_markeredgewidth)\n\n # Set axis limits\n if plot_type == '3d':\n X = np.append(mu[:, distance_idxs[0]], 0)\n Y = np.append(mu[:, distance_idxs[1]], 0)\n Z = np.append(mu[:, distance_idxs[2]], 0)\n mid_x = (X.max() + X.min()) * 0.5\n mid_y = (Y.max() + Y.min()) * 0.5\n mid_z = (Z.max() + Z.min()) * 0.5\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n ax_traj.set_xlim(mid_x - max_range, mid_x + max_range)\n ax_traj.set_ylim(mid_y - max_range, mid_y + max_range)\n ax_traj.set_zlim(mid_z - max_range, mid_z + max_range)\n else:\n if background_dir is None:\n X = np.append(mu[:, distance_idxs[x_index]], 0)\n Y = np.append(mu[:, distance_idxs[y_index]], 0)\n mid_x = (X.max() + X.min()) * 0.5\n mid_y = (Y.max() + Y.min()) * 0.5\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min()]).max() / 2.0\n #ax_traj.set_xlim(mid_x - max_range, mid_x + max_range)\n #ax_traj.set_ylim(mid_y - max_range, mid_y + max_range)\n ax_trajs[vv].set_xlim(min(-relative_zero[x_index], mid_x - max_range), max(-relative_zero[x_index], mid_x + max_range))\n ax_trajs[vv].set_ylim(min(-relative_zero[y_index], mid_y - max_range), max(-relative_zero[y_index], mid_y + max_range))\n plt.axis('equal')\n plt.axis([min(-relative_zero[x_index], mid_x - max_range),\n max(-relative_zero[x_index], mid_x + max_range),\n min(-relative_zero[y_index], mid_y - max_range),\n max(-relative_zero[y_index], mid_y + max_range)])\n else:\n #ymin, ymax = ax_traj.get_ylim()np.\n #xmin, xmax = ax_traj.get_xlim()\n ax_trajs[vv].set_xlim(ax_mins[x_index], ax_maxs[x_index])\n ax_trajs[vv].set_ylim(ax_mins[y_index], ax_maxs[y_index])\n #print(np.arange(ax_mins[x_index], ax_maxs[x_index], 0.1))\n #print(np.arange(ax_mins[y_index], ax_maxs[y_index], 0.1))\n ax_trajs[vv].set_xticks(ax_ticks[x_index])\n ax_trajs[vv].set_yticks(ax_ticks[y_index])\n\n plt.tight_layout()\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=len(itr_to_load), labelspacing=0., fontsize=20)\n #legend = plt.figlegend(lines, labels, loc='center right', ncol=5, labelspacing=0.)\n #legend.get_frame().set_alpha(0.4)\n legend.get_frame().set_alpha(1.0)\n legend.get_frame().set_edgecolor('k')\n\n\nif options['plot_policy_costs']:\n plots_type = 'iteration' # 'iteration' or 'episode'\n include_last_T = False # Only in iteration\n iteration_to_plot = -1\n plot_cost_types = False\n colormap = plt.cm.rainbow # nipy_spectral, Set1, Paired, winter, rainbow\n\n total_cond = len(pol_sample_lists_costs[0][0])\n total_itr = len(pol_sample_lists_costs[0])\n\n temp_array = np.zeros(len(iteration_ids[-1]))\n\n if plots_type.lower() == 'iteration':\n #marker = 'o'\n marker = None\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy Costs Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n des_colormap = [colormap(i) for i in np.linspace(0, 1, total_gps)]\n\n lines = list()\n labels = list()\n\n min_iteration = np.inf\n max_iteration = -np.inf\n for gps in range(total_gps):\n mean_costs = np.zeros(len(iteration_ids[gps]))\n max_costs = np.zeros(len(iteration_ids[gps]))\n min_costs = np.zeros(len(iteration_ids[gps]))\n std_costs = np.zeros(len(iteration_ids[gps]))\n\n for itr in range(len(iteration_ids[gps])):\n total_samples = len(pol_sample_lists_costs[gps][itr][cond])\n samples_cost_sum = pol_sample_lists_costs[gps][itr][cond].sum(axis=1)\n\n temp_array[itr] = samples_cost_sum[-1]\n\n mean_costs[itr] = samples_cost_sum.mean()\n max_costs[itr] = samples_cost_sum.max()\n min_costs[itr] = samples_cost_sum.min()\n std_costs[itr] = samples_cost_sum.std()\n\n if iteration_ids[gps][itr] < min_iteration:\n min_iteration = iteration_ids[gps][itr]\n\n if iteration_ids[gps][itr] > max_iteration:\n max_iteration = iteration_ids[gps][itr]\n\n line = ax.plot(iteration_ids[gps], mean_costs, marker=gps_models_markers[gps],\n label=gps_models_labels[gps], linestyle=gps_models_line_styles[gps],\n color=gps_models_colors[gps])[0]\n ax.fill_between(iteration_ids[gps], min_costs, max_costs, alpha=0.5, color=gps_models_colors[gps])\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n lines.append(line)\n labels.append(gps_models_labels[gps])\n\n\n #ax.set_title('Policy Costs | Condition %d' % cond)\n\n # Composition cost\n if plot_cost_types:\n total_cost_types = len(pol_sample_lists_cost_compositions[-1][-1][-1])\n mean_cost_types = np.zeros([len(iteration_ids[gps]), total_cost_types])\n max_cost_types = np.zeros([len(iteration_ids[gps]), total_cost_types])\n min_cost_types = np.zeros([len(iteration_ids[gps]), total_cost_types])\n std_cost_types = np.zeros([len(iteration_ids[gps]), total_cost_types])\n for itr in range(total_itr):\n total_samples = len(pol_sample_lists_cost_compositions[itr][cond])\n for c in range(total_cost_types):\n cost_type_sum = np.zeros(total_samples)\n for n in range(total_samples):\n cost_type_sum[n] = np.sum(pol_sample_lists_cost_compositions[itr][cond][n][c])\n mean_cost_types[itr, c] = cost_type_sum.mean()\n max_cost_types[itr, c] = cost_type_sum.max()\n min_cost_types[itr, c] = cost_type_sum.min()\n std_cost_types[itr, c] = cost_type_sum.std()\n\n for c in range(total_cost_types):\n label = 'Cost type %d' % c\n line = ax.plot(mean_cost_types[:, c], marker=marker, label=label)[0]\n ax.fill_between(range(total_itr), min_cost_types[:, c], max_cost_types[:, c], alpha=0.5)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n lines.append(line)\n labels.append(label)\n\n print('%'*10)\n print('%d' % gps)\n #least_best_index = np.argpartition(temp_array, len(iteration_ids[gps]))[:len(iteration_ids[gps])]\n best_index = np.argpartition(temp_array, len(iteration_ids[gps])-1)[:len(iteration_ids[gps])-1]\n best_index = best_index[np.argsort(temp_array[best_index])]\n worst_index = np.argpartition(temp_array, -(len(iteration_ids[gps])-1))[-(len(iteration_ids[gps])-1):]\n worst_index = worst_index[np.argsort(-temp_array[worst_index])]\n print(best_index)\n print('%'*10)\n\n #ax.set_xticks(range(min_iteration, max_iteration+1))\n ax.set_xlim(min_iteration, max_iteration)\n ax.set_xticks(range(0, 26, 5))\n ax.set_xlim(0, 25)\n ax.set_xlabel(\"Iterations\", fontsize=30, weight='bold')\n ax.set_ylabel(\"Average Cost\", fontsize=30, weight='bold')\n ax.tick_params(axis='x', labelsize=25)\n ax.tick_params(axis='y', labelsize=25)\n\n #legend = ax.legend(loc='best', ncol=1, fontsize=20)\n legend = ax.legend(ncol=1, fontsize=25)\n #legend = plt.figlegend(lines, labels, loc='center right', ncol=1, labelspacing=0., borderaxespad=1.)\n #legend.get_frame().set_alpha(0.4)\n\n else:\n T = pol_sample_lists_costs[0][0].shape[1]\n if include_last_T is False:\n T = T - 1\n\n if iteration_to_plot is not None:\n if iteration_to_plot == -1:\n iteration_to_plot = total_itr - 1\n itr_to_plot = [iteration_to_plot]\n else:\n itr_to_plot = range(total_itr)\n\n for cond in range(total_cond):\n lines = list()\n labels = list()\n\n total_cost_types = len(pol_sample_lists_cost_compositions[-1][-1][-1])\n\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Policy Costs | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n if plot_cost_types:\n colormap_list = [colormap(i) for i in np.linspace(0, 1, (len(itr_to_plot)*total_cost_types)+1)]\n else:\n colormap_list = [colormap(i) for i in np.linspace(0, 1, len(itr_to_plot))]\n ax.set_prop_cycle('color', colormap_list)\n ax.set_title('Policy Costs | Condition %d' % cond)\n\n mean_costs = np.zeros([total_itr, T])\n max_costs = np.zeros([total_itr, T])\n min_costs = np.zeros([total_itr, T])\n std_costs = np.zeros([total_itr, T])\n for itr in itr_to_plot:\n total_samples = len(pol_sample_lists_costs[itr][cond])\n samples_cost = pol_sample_lists_costs[itr][cond][:, :T]\n mean_costs[itr, :] = samples_cost.mean(axis=0)\n max_costs[itr, :] = samples_cost.max(axis=0)\n min_costs[itr, :] = samples_cost.min(axis=0)\n std_costs[itr, :] = samples_cost.std(axis=0)\n label = 'Total Cost (itr%d)' % itr\n line = ax.plot(mean_costs[itr, :], label=label)[0]\n ax.fill_between(range(T), min_costs[itr, :], max_costs[itr, :], alpha=0.5)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n lines.append(line)\n labels.append(label)\n\n # Composition cost\n if plot_cost_types:\n mean_cost_types = np.zeros([total_itr, total_cost_types, T])\n max_cost_types = np.zeros([total_itr, total_cost_types, T])\n min_cost_types = np.zeros([total_itr, total_cost_types, T])\n std_cost_types = np.zeros([total_itr, total_cost_types, T])\n for itr in itr_to_plot:\n total_samples = len(pol_sample_lists_cost_compositions[itr][cond])\n for c in range(total_cost_types):\n cost_type = np.zeros([total_samples, T])\n for n in range(total_samples):\n cost_type[n, :] = pol_sample_lists_cost_compositions[itr][cond][n][c][:T]\n mean_cost_types[itr, c, :] = cost_type.mean(axis=0)\n max_cost_types[itr, c, :] = cost_type.max(axis=0)\n min_cost_types[itr, c, :] = cost_type.min(axis=0)\n std_cost_types[itr, c, :] = cost_type.std(axis=0)\n\n for c in range(total_cost_types):\n label = 'Cost type %d (itr%d)' % (c, itr)\n line = ax.plot(mean_cost_types[itr, c, :], label=label)[0]\n ax.fill_between(range(T), min_cost_types[itr, c, :], max_cost_types[itr, c, :], alpha=0.5)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\n lines.append(line)\n labels.append(label)\n\n plt.xlabel('Time')\n plt.ylabel('Cost')\n ax.set_xlim([0, T])\n ax.set_xticks(np.arange(0, T+2, 50))\n\n legend = plt.figlegend(lines, labels, loc='center right', ncol=1, labelspacing=0., borderaxespad=1.)\n legend.get_frame().set_alpha(0.4)\n\n\nif options['plot_train_errors']:\n linewidth = 2.5\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Errors per Iteration Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n #for ii in range(axs.size):\n # ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n # ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n des_colormap = [colormap(i) for i in np.linspace(0, 1, total_gps)]\n\n lines = list()\n labels = list()\n\n min_iteration = np.inf\n max_iteration = -np.inf\n for gps in range(total_gps):\n N = iteration_data_list[gps][0][cond].sample_list.get_states().shape[0]\n costs = np.zeros((len(iteration_ids[gps]), N))\n errors = np.zeros(len(iteration_ids[gps]))\n print('%'*20)\n for itr in range(len(iteration_ids[gps])):\n #print(\"%d | %d\" % (gps, itr))\n costs[itr, :] = iteration_data_list[gps][itr][cond].cs.sum(axis=1)\n worst_index = np.argsort(-costs[itr, :])\n print('itr%02d | %s | %f' % (itr, worst_index, costs[itr, worst_index[0]]))\n\n states = iteration_data_list[gps][itr][cond].sample_list.get_states()\n drill_distance = states[:, :, indeces_drill]\n N = drill_distance.shape[0]\n T = drill_distance.shape[1]\n\n error_count = 0\n for nn in range(N):\n # # max_t_errors:\n # acum_t_error = 0\n # for t in range(T):\n # if np.any(np.abs(drill_distance[nn, t, :]) > failure_limits):\n # acum_t_error += 1\n # if acum_t_error > max_t_acum_error:\n # error_count += 1\n # break\n\n # only last error\n if np.any(np.abs(drill_distance[nn, -1, :]) > failure_limits):\n error_count += 1\n\n\n errors[itr] = float(error_count)/N\n\n if iteration_ids[gps][itr] < min_iteration:\n min_iteration = iteration_ids[gps][itr]\n\n if iteration_ids[gps][itr] > max_iteration:\n max_iteration = iteration_ids[gps][itr]\n\n #line = ax.plot(iteration_ids[gps], errors, color=des_colormap[gps], label=gps_models_labels[gps],\n line = ax.plot(iteration_ids[gps], errors, label=gps_models_labels[gps], color=gps_models_colors[gps],\n linestyle=gps_models_line_styles[gps], linewidth=linewidth, marker=gps_models_markers[gps])[0]\n\n lines.append(line)\n labels.append(gps_models_labels[gps])\n\n worst_ranking = np.dstack(np.unravel_index(np.argsort(-costs.ravel()), costs.shape))\n print('&'*10)\n print('%s' % worst_ranking[0, :15, :])\n for ww in range(15):\n print('%s' % costs[worst_ranking[0, ww, 0], worst_ranking[0, ww, 1]])\n\n ax.set_xlabel(\"Iterations\", fontsize=30, weight='bold')\n ax.set_ylabel(\"Bad Samples / Total Samples\", fontsize=30, weight='bold')\n ax.tick_params(axis='x', labelsize=25)\n ax.tick_params(axis='y', labelsize=25)\n #ax.set_xticks(range(min_iteration, max_iteration+1, 5))\n ax.set_xlim(min_iteration, max_iteration)\n ax.set_xticks(range(0, 26, 5))\n ax.set_xlim(0, 25)\n #ax_trajs[vv].set_ylim(ax_mins[y_index], ax_maxs[y_index])\n\n\n #legend = plt.figlegend(lines, labels, loc='center right', ncol=1, labelspacing=0., #borderaxespad=1.,\n # fontsize=20, bbox_to_anchor=(5.0, 0.0))\n #legend = ax.legend(lines, labels, loc='center right', ncol=1, labelspacing=0., #borderaxespad=1.,\n # fontsize=20, bbox_to_anchor=(-5.0, 0.0))\n #legend = ax.legend(loc='center right', bbox_to_anchor=(1.5, 0), ncol=1, fontsize=20)\n legend = ax.legend(ncol=1, fontsize=25)\n legend = ax.legend(borderaxespad=0.5, fontsize=20)\n box = ax.get_position()\n #ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n #legend.get_frame().set_alpha(0.4)\n #legend.get_frame().set_edgecolor('k')\n\nplt.show(block=False)\n\nraw_input('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5448552966117859, "alphanum_fraction": 0.5984806418418884, "avg_line_length": 32.520599365234375, "blob_id": "2094293d03fcbbab66017a20efb1126373c723e9", "content_id": "a72e56b464d6dadbfa48d7aef415f7a135199ffa", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8951, "license_type": "permissive", "max_line_length": 119, "num_lines": 267, "path": "/scenarios/tests/cost-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_fk import CostFK\nfrom robolearn.old_costs.cost_fk_relative import CostFKRelative\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_CONSTANT, RAMP_FINAL_ONLY\nfrom robolearn.old_costs.cost_utils import evall1l2term\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.robot_model import RobotModel\nfrom robolearn.old_utils.sample import Sample\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import create_box_relative_pose\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import create_hand_relative_pose\n\nnp.set_printoptions(precision=8, suppress=True, linewidth=1000)\n\n# ########## #\n# Parameters #\n# ########## #\n\nT = 100\ndX = 54\ndU = 14\ndO = 54\n# BOX\nbox_x = 0.75-0.05\nbox_y = 0.00\nbox_z = 0.0184\nbox_yaw = 0 # Degrees\nbox_size = [0.4, 0.5, 0.3]\nbox_relative_pose = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw)\n\n\n# ########### #\n# Environment #\n# ########### #\nclass FakeEnv(object):\n def __init__(self, dim_o, dim_x, dim_u):\n self.dO = dim_o\n self.dX = dim_x\n self.dU = dim_u\n\n def get_state_dim(self):\n return self.dX\n\n def get_action_dim(self):\n return self.dU\n\n def get_obs_dim(self):\n return self.dO\n\n @staticmethod\n def get_env_info():\n return ''\n\nbigman_env = FakeEnv(dO, dX, dU)\n\n\n# ########### #\n# Robot Model #\n# ########### #\n\n# Robot Model (It is used to calculate the IK cost)\nrobot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'\n# robot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf_file)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\n\n\n# ###### #\n# Sample #\n# ###### #\nsample = Sample(bigman_env, T)\n\n# Robot Configuration\n# qLA = np.array([0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633])\nqLA = np.deg2rad([0, 50, 0, -75, 0, 0, 0])\n# qRA = np.array([0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])\nqRA = np.deg2rad([0, -50, 0, -75, 0, 0, 0])\n\ndLA = [0.1, 0.1, 0.1, -0.1, 0.1, 0.1]\ndRA = [0.1, 0.1, 0.1, -0.1, 0.1, 0.1]\n\nall_actions = np.ones((T, dU))*1\nall_obs = np.zeros((T, dO))\nall_states = np.zeros((T, dX))\nall_states[:, :7] = np.tile(qLA, (T, 1))\nall_states[:, 7:14] = np.tile(qRA, (T, 1))\n#all_states[:, -7:] = np.tile(box_relative_pose[[3, 4, 5, 6, 0, 1, 2]], (T, 1))\nall_states[:, 42:48] = np.tile(dLA, (T, 1))\nall_states[:, 48:54] = np.tile(dLA, (T, 1))\n\nsample.set_acts(all_actions) # Set all actions at the same time\nsample.set_obs(all_obs) # Set all obs at the same time\nsample.set_states(all_states) # Set all states at the same time\n\n# #### #\n# Cost #\n# #### #\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(dU) * 1e-4,\n 'target': None, # Target action value\n}\n\n# State Cost\ntarget_state = box_relative_pose[[3, 4, 5, 6, 0, 1, 2]]\nstate_cost = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'l1': 0.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 5.0, # Weight multiplier on final time step.\n 'data_types': {\n 'optitrack': {\n 'wp': np.ones_like(target_state), # State weights - must be set.\n 'target_state': target_state, # Target state - must be set.\n 'average': None, # (12, 3),\n 'data_idx': range(28, 35)\n },\n },\n}\n\n# FK Cost\nleft_hand_rel_pose = create_hand_relative_pose([0, 0, 0, 0, 0, 0, 1],\n hand_x=0.0, hand_y=box_size[1]/2-0.02, hand_z=0.0, hand_yaw=0)\nleft_hand_rel_pose[:] = left_hand_rel_pose[[3, 4, 5, 6, 0, 1, 2]] # Changing from 'pos+orient' to 'orient+pos'\nprint(left_hand_rel_pose)\nLAfk_cost = {\n 'type': CostFKRelative,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_rel_pose': left_hand_rel_pose,\n 'rel_data_type': 'state', # 'state' or 'observation'\n #'rel_data_name': 'optitrack', # Name of the state/observation\n 'rel_idx': range(28, 35),\n 'data_idx': range(0, 14),\n 'op_point_name': LH_name,\n 'op_point_offset': l_soft_hand_offset,\n 'joint_ids': bigman_params['joint_ids']['BA'],\n 'robot_model': robot_model,\n #'wp': np.array([1.2, 0, 0.8, 1, 1.2, 0.8]), # one dim less because 'quat' error | 1)orient 2)pos\n #'wp': np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'l1': 0.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1,\n}\n\n\ntarget_distance_left_arm = np.zeros(6)\nLAfk_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_left_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': range(42, 48),\n 'op_point_name': LH_name,\n 'op_point_offset': l_soft_hand_offset,\n 'joints_idx': range(7),\n 'joint_ids': bigman_params['joint_ids']['LA'],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10,\n}\n\n\n\nright_hand_rel_pose = create_hand_relative_pose([0, 0, 0, 0, 0, 0, 1],\n hand_x=0.0, hand_y=-box_size[1]/2+0.02, hand_z=0.0, hand_yaw=0)\nright_hand_rel_pose[:] = right_hand_rel_pose[[3, 4, 5, 6, 0, 1, 2]] # Changing from 'pos+orient' to 'orient+pos'\nRAfk_cost = {\n 'type': CostFKRelative,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT,LINEAR, QUADRATIC, FINAL_ONLY\n 'target_rel_pose': right_hand_rel_pose,\n 'rel_data_type': 'state', # 'state' or 'observation'\n #'rel_data_name': 'optitrack', # Name of the state/observation\n 'rel_idx': range(28, 35),\n 'data_idx': range(0, 14),\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joint_ids': bigman_params['joint_ids']['BA'],\n 'robot_model': robot_model,\n #'wp': np.array([1.2, 0, 0.8, 1, 1.2, 0.8]), # one dim less because 'quat' error | 1)orient 2)pos\n #'wp': np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'l1': 0.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 5,\n}\n\n\n\n\ncost_sum = {\n 'type': CostSum,\n #'costs': [act_cost, state_cost, LAfk_cost, RAfk_cost],\n #'weights': [0.1, 5.0, 8.0, 8.0],\n # 'costs': [act_cost, state_cost],#, LAfk_cost, RAfk_cost],\n # 'weights': [0.1, 5.0],\n 'costs': [LAfk_final_cost],\n 'weights': [1.0],\n}\n\ncost1 = LAfk_cost['type'](LAfk_cost)\ncost2 = RAfk_cost['type'](RAfk_cost)\ncost3 = act_cost['type'](act_cost)\ncost4 = state_cost['type'](state_cost)\ncost5 = LAfk_final_cost['type'](LAfk_final_cost)\n#cost = cost_sum['type'](cost_sum)\n\n\nprint(\"Evaluating sample's cost...\")\nl, lx, lu, lxx, luu, lux = cost5.eval(sample)\nprint('----')\nprint(\"l[1]: %f\" % l[1])\nprint(\"l[-1]: %f\" % l[-1])\n# print(lx[1, :])\n# print(lx[-1, :])\n# print(lu[1, :])\n# print(lu[-1, :])\n\n# print(\"l %s\" % str(l.shape))\n# print(\"lx %s\" % str(lx.shape))\n# print(\"lu %s\" % str(lu.shape))\n# print(\"lxx %s\" % str(lxx.shape))\n# print(\"luu %s\" % str(luu.shape))\n# print(\"lux %s\" % str(lux.shape))\n\n\n# l, lx, lu, lxx, luu, lux = cost2.eval(sample)\n# print('%%%%')\n# print(l[1])\n# print(l[-1])\n# print(lx[1, :])\n# print(lx[-1, :])\n# print(lu[1, :])\n# print(lu[-1, :])\n\n# l, lx, lu, lxx, luu, lux = cost3.eval(sample)\n# print('%%%%')\n# print(l[1])\n# print(l[-1])\n# print(lx[1, :])\n# print(lx[-1, :])\n# print(lu[1, :])\n# print(lu[-1, :])\n\nplt.plot(l)\n#plt.plot(lx)\n#plt.plot(lu)\n#plt.plot(lxx)\n#plt.plot(lux)\nplt.show()\n\n" }, { "alpha_fraction": 0.5305535793304443, "alphanum_fraction": 0.548166811466217, "avg_line_length": 32.51807403564453, "blob_id": "33ff9a54753d4dc94e029a9a40496e4c78819e91", "content_id": "76c88a58d34b79bc59b13041abe31c6697f3dc9c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2782, "license_type": "permissive", "max_line_length": 89, "num_lines": 83, "path": "/scripts/sim_centauro_haarnoja.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import argparse\n\nimport joblib\nimport tensorflow as tf\n\nfrom rllab.sampler.utils import rollout\n\nfrom rllab.envs.normalized_env import normalize\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, help='Path to the snapshot file.')\n parser.add_argument('--max-path-length', '-l', type=int, default=500)\n parser.add_argument('--speedup', '-s', type=float, default=1)\n parser.add_argument('--deterministic', '-d', dest='deterministic',\n action='store_true')\n parser.add_argument('--no-deterministic', '-nd', dest='deterministic',\n action='store_false')\n parser.add_argument('--policy_h', type=int)\n parser.set_defaults(deterministic=True)\n\n args = parser.parse_args()\n\n return args\n\ndef simulate_policy(args):\n with tf.Session() as sess:\n data = joblib.load(args.file)\n if 'algo' in data.keys():\n policy = data['algo'].policy\n # env = data['algo'].env\n else:\n policy = data['policy']\n # env = data['env']\n\n SIM_TIMESTEP = 0.01\n FRAME_SKIP = 1\n DT = SIM_TIMESTEP * FRAME_SKIP\n env_params = dict(\n is_render=True,\n obs_with_img=False,\n active_joints='RA',\n control_mode='tasktorque',\n # _control_mode='torque',\n # _control_mode='velocity',\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n obs_distances=False,\n balance_cost_weight=2.0,\n fall_cost_weight=2.0,\n tgt_cost_weight=2.0,\n balance_done_cost=2.0,#*PATH_LENGTH, # TODO: dont forget same balance weight\n tgt_done_reward=2.0,\n # tgt_cost_weight=5.0,\n # balance_cost_weight=0.0,\n # fall_cost_weight=0.0,\n # tgt_cost_weight=0.0,\n # balance_cost_weight=5.0,\n # fall_cost_weight=7.0,\n ctrl_cost_weight=1.0e-1,\n use_log_distances=True,\n log_alpha_pos=1e-4,\n log_alpha_ori=1e-4,\n goal_tolerance=0.05,\n min_obj_height=0.60,\n max_obj_height=1.20,\n max_obj_distance=0.20,\n max_time=None,\n )\n\n env = normalize(CentauroTrayEnv(**env_params))\n\n with policy.deterministic(args.deterministic):\n while True:\n path = rollout(env, policy,\n max_path_length=args.max_path_length,\n animated=True, speedup=args.speedup)\n input(\"Press a key to re-sample...\")\nif __name__ == \"__main__\":\n args = parse_args()\n simulate_policy(args)\n" }, { "alpha_fraction": 0.6156893968582153, "alphanum_fraction": 0.6160855889320374, "avg_line_length": 42.517242431640625, "blob_id": "188529fb0dbd729ac43830eb4e0ca46e6b0fad6a", "content_id": "6579ab2bdda9aaa2117fdcbaab143a8dce51aafd", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2524, "license_type": "permissive", "max_line_length": 82, "num_lines": 58, "path": "/scripts/plot_experiment.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import plot_process_iu_returns\nfrom robolearn.utils.plots import plot_process_iu_avg_rewards\nfrom robolearn.utils.plots import plot_process_iu_policies\nfrom robolearn.utils.plots import plot_process_iu_values_errors\nfrom robolearn.utils.plots import plot_process_iu_alphas\nfrom robolearn.utils.plots import plot_process_general_data\nfrom robolearn.utils.plots.learning_process_plots import plot_process_haarnoja\nimport json\n\n\ndef main(args):\n # Load environment\n dirname = os.path.dirname(args.file)\n with open(os.path.join(dirname, 'variant.json')) as json_data:\n algo_name = json.load(json_data)['algo_name']\n\n # Plot according to RL algorithm\n if algo_name in ['HIUSAC', 'HIUSACNEW', 'SAC', 'HIUSACEpisodic']:\n plot_process_iu_values_errors(csv_file=args.file, n_unintentional=args.un,\n block=False)\n plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,\n block=False, plot_intentional=args.no_in,\n deterministic=False)\n plot_process_iu_alphas(csv_file=args.file, n_unintentional=args.un,\n block=False)\n plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,\n block=False)\n plot_process_iu_avg_rewards(csv_file=args.file,\n n_unintentional=args.un,\n block=False)\n\n elif algo_name in ['HIUDDPG']:\n plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,\n block=False, plot_intentional=args.no_in,\n deterministic=True)\n plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,\n block=False)\n else:\n plot_process_general_data(csv_file=args.file, block=False)\n\n # plot_process_haarnoja(csv_file=args.file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./progress.csv',\n help='path to the progress.csv file')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n parser.add_argument('--no_in', action='store_false')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.5119675993919373, "alphanum_fraction": 0.5148715376853943, "avg_line_length": 34.291927337646484, "blob_id": "d40197f39e50f559849cda7426ef8769502c620d", "content_id": "3c04179240fba207918e03ea1b4b9b08a8366733", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11364, "license_type": "permissive", "max_line_length": 79, "num_lines": 322, "path": "/robolearn/torch/policies/tanh_gaussian_multi_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom robolearn.torch.utils.nn import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom robolearn.torch.utils.nn import identity\nfrom robolearn.torch.utils.distributions import TanhNormal\n\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\n\nclass TanhGaussianMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianMultiPolicy(...)\n action, mean, log_std, _ = policy(obs)\n action, mean, log_std, _ = policy(obs, deterministic=True)\n action, mean, log_std, log_prob = policy(obs, return_log_prob=True)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes,\n unshared_hidden_sizes=None,\n stds=None,\n hidden_activation=F.relu,\n output_activation=identity,\n hidden_w_init=ptu.xavier_initOLD,\n hidden_b_init_val=0,\n output_w_init=ptu.xavier_initOLD,\n output_b_init_val=0,\n shared_layer_norm=False,\n unshared_layer_norm=False,\n layer_norm_kwargs=None,\n **kwargs\n ):\n self.save_init_params(locals())\n super(TanhGaussianMultiPolicy, self).__init__()\n ExplorationPolicy.__init__(self, action_dim)\n\n if layer_norm_kwargs is None:\n layer_norm_kwargs = dict()\n\n self.input_size = obs_dim\n self.output_sizes = action_dim\n self._n_policies = n_policies\n self.hidden_activation = hidden_activation\n self.output_activation = output_activation\n self.shared_layer_norm = shared_layer_norm\n self.unshared_layer_norm = unshared_layer_norm\n self.fcs = []\n self.shared_layer_norms = []\n self.ufcs = [list() for _ in range(self._n_policies)]\n self.unshared_layer_norms = [list() for _ in range(self._n_policies)]\n self.last_fcs = []\n in_size = self.input_size\n\n # Shared Layers\n for i, next_size in enumerate(shared_hidden_sizes):\n fc = nn.Linear(in_size, next_size)\n in_size = next_size\n hidden_w_init(fc.weight)\n ptu.fill(fc.bias, hidden_b_init_val)\n self.__setattr__(\"fc{}\".format(i), fc)\n self.fcs.append(fc)\n\n if self.shared_layer_norm:\n ln = LayerNorm(next_size)\n self.__setattr__(\"shared_layer_norm{}\".format(i), ln)\n self.shared_layer_norms.append(ln)\n\n # Unshared Layers\n if unshared_hidden_sizes is not None:\n for i, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_policies):\n ufc = nn.Linear(in_size, next_size)\n hidden_w_init(ufc.weight)\n ptu.fill(ufc.bias, hidden_b_init_val)\n self.__setattr__(\"ufc{}_{}\".format(pol_idx, i), ufc)\n self.ufcs[pol_idx].append(ufc)\n\n if self.unshared_layer_norm:\n ln = LayerNorm(next_size)\n tmp_txt = \"unshared_layer_norm{}_{}\".format(pol_idx, i)\n self.__setattr__(tmp_txt, ln)\n self.unshared_layer_norms[pol_idx].append(ln)\n in_size = next_size\n\n for pol_idx in range(self._n_policies):\n last_fc = nn.Linear(in_size, self._action_dim)\n output_w_init(last_fc.weight)\n ptu.fill(last_fc.bias, output_b_init_val)\n self.__setattr__(\"last_fc{}\".format(pol_idx), last_fc)\n self.last_fcs.append(last_fc)\n\n self.stds = stds\n self.log_std = list()\n if stds is None:\n self.last_fc_log_stds = list()\n for pol_idx in range(self._n_policies):\n last_hidden_size = obs_dim\n if unshared_hidden_sizes is None:\n if len(shared_hidden_sizes) > 0:\n last_hidden_size = shared_hidden_sizes[-1]\n else:\n last_hidden_size = unshared_hidden_sizes[-1]\n last_fc_log_std = nn.Linear(last_hidden_size,\n action_dim)\n hidden_w_init(last_fc_log_std.weight)\n ptu.fill(last_fc_log_std.bias, hidden_b_init_val)\n self.__setattr__(\"last_fc_log_std{}\".format(pol_idx),\n last_fc_log_std)\n self.last_fc_log_stds.append(last_fc_log_std)\n\n else:\n for std in stds:\n self.log_std.append(np.log(stds))\n assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX\n\n def get_action(self, obs_np, **kwargs):\n pol_idxs = kwargs['pol_idxs']\n\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n if len(pol_idxs) > 1:\n actions = [action[0, :] for action in actions]\n else:\n actions = actions[0, :]\n\n for key, vals in info_dict.items():\n if len(pol_idxs) > 1:\n info_dict[key] = [val[0, :] if isinstance(val, np.ndarray)\n else None for val in vals]\n else:\n info_dict[key] = vals[0, :] if isinstance(vals, np.ndarray) \\\n else None\n\n return actions, info_dict\n\n def get_actions(self, obs_np, **kwargs):\n pol_idxs = kwargs['pol_idxs']\n\n actions, info_dict = self.eval_np(obs_np, **kwargs)\n\n if len(pol_idxs) > 1:\n actions = [np_ify(tensor) for tensor in actions]\n else:\n actions = np_ify(actions)\n\n for key, vals in info_dict.items():\n if len(pol_idxs) > 1:\n info_dict[key] = [np_ify(val) for val in vals]\n else:\n info_dict[key] = np_ify(vals)\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n pol_idxs=None,\n deterministic=False,\n return_log_prob=False,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n pol_idxs (iterator):\n deterministic (bool):\n return_log_prob (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n if pol_idxs is None:\n pol_idxs = list(range(self._n_policies))\n\n h = obs\n # Shared Layers\n for i, fc in enumerate(self.fcs):\n h = self.hidden_activation(fc(h))\n\n hs = [h for _ in pol_idxs]\n\n # Unshared Layers\n if len(self.ufcs) > 0:\n for ii, idx in enumerate(pol_idxs):\n for i, fc in enumerate(self.ufcs[idx]):\n hs[ii] = self.hidden_activation(fc(hs[ii]))\n\n means = [self.last_fcs[idx](hs[ii])\n for ii, idx in enumerate(pol_idxs)]\n\n if self.stds is None:\n log_stds = [None for _ in pol_idxs]\n stds = [None for _ in pol_idxs]\n for ii, idx in enumerate(pol_idxs):\n log_stds[ii] = self.last_fc_log_stds[idx](hs[ii])\n log_stds[ii] = torch.clamp(log_stds[ii],\n min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n stds[ii] = torch.exp(log_stds[ii])\n else:\n stds = self.stds\n log_stds = self.log_std\n\n log_probs = [None for _ in pol_idxs]\n expected_log_probs = [None for _ in pol_idxs]\n mean_action_log_probs = [None for _ in pol_idxs]\n pre_tanh_values = [None for _ in pol_idxs]\n\n if deterministic:\n actions = [torch.tanh(mean) for mean in means]\n else:\n actions = [None for _ in means]\n for ii in range(len(pol_idxs)):\n mean = means[ii]\n std = stds[ii]\n tanh_normal = TanhNormal(mean, std)\n if return_log_prob:\n actions[ii], pre_tanh_values[ii] = tanh_normal.rsample(\n return_pretanh_value=True\n )\n log_probs[ii] = tanh_normal.log_prob(\n actions[ii],\n pre_tanh_value=pre_tanh_values[ii]\n )\n log_probs[ii] = log_probs[ii].sum(dim=-1, keepdim=True)\n else:\n actions[ii], pre_tanh_values[ii] = \\\n tanh_normal.rsample(return_pretanh_value=True)\n\n if len(pol_idxs) == 1:\n actions = actions[0]\n means = means[0]\n log_stds = log_stds[0]\n log_probs = log_probs[0]\n expected_log_probs = expected_log_probs[0]\n stds = stds[0]\n mean_action_log_probs = mean_action_log_probs[0]\n pre_tanh_values = pre_tanh_values[0]\n\n info_dict = dict(\n mean=means,\n log_std=log_stds,\n log_prob=log_probs,\n expected_log_prob=expected_log_probs,\n std=stds,\n mean_action_log_prob=mean_action_log_probs,\n pre_tanh_value=pre_tanh_values,\n )\n\n return actions, info_dict\n\n def log_action(self, actions, obs, pol_idxs=None):\n if pol_idxs is None:\n pol_idxs = list(range(self._n_policies))\n assert len(pol_idxs) == len(actions)\n\n h = obs\n # Shared Layers\n for i, fc in enumerate(self.fcs):\n h = self.hidden_activation(fc(h))\n\n hs = [h for _ in pol_idxs]\n\n # Unshared Layers\n if len(self.ufcs) > 0:\n for ii, idx in enumerate(pol_idxs):\n for i, fc in enumerate(self.ufcs[idx]):\n hs[ii] = self.hidden_activation(fc(hs[ii]))\n\n means = [self.last_fcs[idx](hs[ii])\n for ii, idx in enumerate(pol_idxs)]\n\n if self.stds is None:\n log_stds = [None for _ in pol_idxs]\n stds = [None for _ in pol_idxs]\n for ii, idx in enumerate(pol_idxs):\n log_stds[ii] = self.last_fc_log_stds[idx](hs[ii])\n log_stds[ii] = torch.clamp(log_stds[ii],\n min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n stds[ii] = torch.exp(log_stds[ii])\n else:\n stds = self.stds\n\n log_probs = [None for _ in pol_idxs]\n\n for ii in range(len(pol_idxs)):\n mean = means[ii]\n std = stds[ii]\n tanh_normal = TanhNormal(mean, std)\n log_probs[ii] = torch.sum(tanh_normal.log_prob(actions),\n dim=-1, keepdim=True)\n\n return log_probs\n\n # z = (actions - mean)/stds\n # return -0.5 * torch.sum(torch.mul(z, z), dim=-1, keepdim=True)\n\n @property\n def n_heads(self):\n return self._n_policies\n" }, { "alpha_fraction": 0.5212877988815308, "alphanum_fraction": 0.5322231650352478, "avg_line_length": 36.844539642333984, "blob_id": "7bfed937bea33492b6e11f944c027eba4947a867", "content_id": "3708f5303b400b909bf82da0672a9fff31f0548b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18015, "license_type": "permissive", "max_line_length": 135, "num_lines": 476, "path": "/scenarios/bigman-ex.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom robolearn.old_utils.iit.iit_robots_params import *\nfrom robolearn.old_envs import BigmanEnv\nfrom robolearn.old_agents import GPSAgent\n\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_model_example import tf_network\n\nfrom robolearn.old_utils.sample import Sample\nfrom robolearn.old_utils.sample_list import SampleList\n\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_QUADRATIC\n\nfrom robolearn.old_utils.algos_utils import IterationData\nfrom robolearn.old_utils.algos_utils import TrajectoryInfo\nfrom robolearn.old_algos.gps.gps import GPS\nfrom robolearn.old_policies.lin_gauss_init import init_lqr, init_pd\nfrom robolearn.old_policies.policy_prior import PolicyPrior # For MDGPS\n\nimport rospy\nfrom robolearn.old_utils.print_utils import *\n\nimport time\n\n\n# ################## #\n# ################## #\n# ### PARAMETERS ### #\n# ################## #\n# ################## #\n# Task parameters\n#update_frequency = 5\nTs = 0.01\nEndTime = 5 # Using final time to define the horizon\n\n\n# ################### #\n# ################### #\n# ### ENVIRONMENT ### #\n# ################### #\n# ################### #\n\nprint(\"\\nCreating Bigman environment...\")\n\n# Robot configuration\ninterface = 'ros'\nbody_part_active = 'LA'\ncommand_type = 'velocity'\nfile_save_restore = \"models/bigman_agent_vars.ckpt\"\n\n\nobservation_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'ros_topic': '/xbotcore/bigman/joint_states',\n 'fields': ['link_position', 'link_velocity', 'effort'],\n 'joints': bigman_params['joint_ids']['UB']}, # Value that can be gotten from robot_params['joints_names']['UB']\n\n {'name': 'ft_left_arm',\n 'type': 'ft_sensor',\n 'ros_topic': '/xbotcore/bigman/ft/l_arm_ft',\n 'fields': ['force', 'torque']},\n\n {'name': 'ft_right_arm',\n 'type': 'ft_sensor',\n 'ros_topic': '/xbotcore/bigman/ft/r_arm_ft',\n 'fields': ['force', 'torque']},\n\n {'name': 'ft_left_leg',\n 'type': 'ft_sensor',\n 'ros_topic': '/xbotcore/bigman/ft/l_leg_ft',\n 'fields': ['force', 'torque']},\n\n {'name': 'ft_right_leg',\n 'type': 'ft_sensor',\n 'ros_topic': '/xbotcore/bigman/ft/r_leg_ft',\n 'fields': ['force', 'torque']},\n\n {'name': 'imu1',\n 'type': 'imu',\n 'ros_topic': '/xbotcore/bigman/imu/imu_link',\n 'fields': ['orientation', 'angular_velocity', 'linear_acceleration']},\n\n {'name': 'optitrack',\n 'type': 'optitrack',\n 'ros_topic': '/optitrack/relative_poses',\n 'fields': ['position', 'orientation'],\n 'bodies': ['LSoftHand', 'RSoftHand', 'box']},\n ]\n\n#observation_active = [{'name': 'imu1',\n# 'type': 'imu',\n# 'ros_topic': '/xbotcore/bigman/imu/imu_link',\n# 'fields': ['orientation', 'angular_velocity', 'linear_acceleration']}]\n\n#state_active = [{'name': 'joint_state',\n# 'type': 'joint_state',\n# 'fields': ['link_position', 'link_velocity'],\n# 'joints': bigman_params['joint_ids']['LA']}] # Value that can be gotten from robot_params['joints_ids']['LA']\n\nstate_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'fields': ['link_position', 'link_velocity'],\n 'joints': bigman_params['joint_ids']['LA']},\n\n {'name': 'optitrack',\n 'type': 'optitrack',\n 'fields': ['position', 'orientation'],\n 'bodies': ['box']}] # check if it is better relative position with EE(EEs)\n\n\n\n\n# Create a Bigman robot ROS EnvInterface\nbigman_env = BigmanEnv(interface=interface, mode='simulation',\n body_part_active=body_part_active, command_type=command_type,\n observation_active=observation_active,\n state_active=state_active,\n cmd_freq=int(1/Ts))\n\n# TODO: DOMINGOOOO\n# TODO: Temporally using current state to set one initial condition\ncurrent_state = bigman_env.get_state()\nbigman_env.set_initial_conditions([current_state])\n\naction_dim = bigman_env.action_dim\nstate_dim = bigman_env.state_dim\nobservation_dim = bigman_env.obs_dim\n\nprint(\"Bigman Environment OK. body_part_active:%s (action_dim=%d). Command_type:%s\" % (body_part_active, action_dim, command_type))\n\n\n# ################# #\n# ################# #\n# ##### AGENT ##### #\n# ################# #\n# ################# #\nprint(\"\\nCreating Bigman Agent...\")\n\n# Create an Agent\n# Agent option\n#policy_params = {\n# 'network_params': {\n# 'obs_include': [JOINT_ANGLES, JOINT_VELOCITIES],\n# 'obs_vector_data': [JOINT_ANGLES, JOINT_VELOCITIES],\n# 'sensor_dims': SENSOR_DIMS,\n# },\n# 'network_model': tf_network,\n# 'iterations': 1000,\n# 'weights_file_prefix': EXP_DIR + 'policy',\n#}\npolicy_params = {\n 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n 'iterations': 500, # Inner iteration (Default:5000). Reccomended: 1000?\n 'network_params': {\n 'n_layers': 1, # Hidden layers??\n 'dim_hidden': [40], # Dictionary of size per n_layers\n 'obs_names': bigman_env.get_obs_info()['names'],\n 'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n 'batch_size': 15, # TODO: Check if this value is OK (same than name_samples)\n #'num_filters': [5, 10],\n #'obs_include': [JOINT_ANGLES, JOINT_VELOCITIES, RGB_IMAGE], # Deprecated from original GPS code\n #'obs_vector_data': [JOINT_ANGLES, JOINT_VELOCITIES], # Deprecated from original GPS code\n #'obs_image_data': [RGB_IMAGE], # Deprecated from original GPS code\n #'sensor_dims': SENSOR_DIMS, # Deprecated from original GPS code\n #'image_width': IMAGE_WIDTH (80), # For multi_modal_network\n #'image_height': IMAGE_HEIGHT (64), # For multi_modal_network\n #'image_channels': IMAGE_CHANNELS (3), # For multi_modal_network\n }\n}\npolicy = PolicyOptTf(policy_params, observation_dim, action_dim)\n#policy = None\nbigman_agent = GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy=policy)\n# Load previous learned variables\n#bigman_agent.load(file_save_restore)\nprint(\"Bigman Agent:%s OK\\n\" % type(bigman_agent))\n\n\n# ################# #\n# ################# #\n# ##### COSTS ##### #\n# ################# #\n# ################# #\n\n# Action Cost #TODO: I think it doesn't have sense if the control is joint position\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(action_dim) * 1e-4,\n #'l1': 1e-3,\n #'alpha': 1e-2,\n 'target': None, # Target action value\n}\n\n# State Cost\nbox_pose = [-0.7500, # pos x\n 0.0000, # pos y\n 0.0184, # pos z\n 0.0000, # orient x\n 0.0000, # orient y\n 0.0000, # orient z\n 1.0000] # orient w\n\nbox_size = [0.4, 0.5, 0.3]\n\nleft_ee_pose = box_pose\nleft_ee_pose[0] += box_size[0]/2 - 0.05 #\n\ntarget_state = left_ee_pose + box_pose\n# 'B' pose\nstate_cost = {\n 'type': CostState,\n 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT,LINEAR, QUADRATIC, FINAL_ONLY\n 'l1': 0.0,\n 'l2': 1.0,\n 'wp_final_multiplier': 5.0, # Weight multiplier on final time step.\n 'data_types': {\n 'optitrack': {\n 'wp': np.ones_like(target_state), # State weights - must be set.\n 'target_state': target_state, # Target state - must be set.\n 'average': None, #(12, 3),\n 'data_idx': bigman_env.get_state_info(name='optitrack')['idx']\n }\n },\n}\n#state_cost = {\n# 'type': CostState,\n# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT,LINEAR, QUADRATIC, FINAL_ONLY\n# 'l1': 0.0,\n# 'l2': 1.0,\n# 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n# 'data_types': {\n# 'link_position': {\n# 'wp': np.ones_like(target_pos), # State weights - must be set.\n# 'target_state': target_pos, # Target state - must be set.\n# 'average': None, #(12, 3),\n# 'data_idx': bigman_env.get_state_info(name='link_position')['idx']\n# },\n# 'link_velocity': {\n# 'wp': np.ones_like(target_vel), # State weights - must be set.\n# 'target_state': target_vel, # Target state - must be set.\n# 'average': None, #(12, 3),\n# 'data_idx': bigman_env.get_state_info(name='link_velocity')['idx']\n# },\n# },\n#}\n\n# Sum of costs\ncost_sum = {\n 'type': CostSum,\n 'costs': [act_cost, state_cost],\n 'weights': [0.1, 5.0],\n}\n\n\n# ######################## #\n# ######################## #\n# ## LEARNING ALGORITHM ## #\n# ######################## #\n# ######################## #\n\n# Learning params\ntotal_episodes = 5\nnum_samples = 5 # Samples for exploration trajs\nresume_training_itr = None # Resume from previous training iteration\nT = int(EndTime/Ts) # Total points\nconditions = 1 # Number of initial conditions\nsample_on_policy = False\ntest_policy_after_iter = False\nkl_step = 0.2\n# init_traj_distr is a list of dict\ninit_traj_distr = {'type': init_lqr,\n 'init_var': 1.0,\n 'stiffness': 1.0,\n 'stiffness_vel': 0.5,\n 'final_weight': 1.0,\n # Parameters for guessing dynamics\n 'init_acc': np.zeros(action_dim), # dU vector(np.array) of accelerations, default zeros.\n 'init_gains': 1*np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n }\n#init_traj_distr = [{'type': init_pd,\n# 'init_var': 0.00001, # initial variance (Default:10)\n# 'pos_gains': 0.001, # position gains (Default:10)\n# 'vel_gains_mult': 0.01, # velocity gains multiplier on pos_gains\n# 'init_action_offset': None,\n# }]\n\n#gps_algo = 'pigps'\n## PIGPS hyperparams\n#gps_algo_hyperparams = {'init_pol_wt': 0.01,\n# 'policy_sample_mode': 'add'\n# }\ngps_algo = 'mdgps'\n# MDGPS hyperparams\ngps_algo_hyperparams = {'init_pol_wt': 0.01,\n 'policy_sample_mode': 'add',\n # Whether to use 'laplace' or 'mc' cost in step adjusment\n 'step_rule': 'laplace',\n 'policy_prior': {'type': PolicyPrior},\n }\nlearn_algo = GPS(agent=bigman_agent, env=bigman_env,\n iterations=total_episodes, num_samples=num_samples,\n T=T, dt=Ts,\n cost=cost_sum,\n conditions=conditions,\n sample_on_policy=sample_on_policy,\n test_after_iter=test_policy_after_iter,\n init_traj_distr=init_traj_distr,\n kl_step=kl_step,\n gps_algo=gps_algo,\n gps_algo_hyperparams=gps_algo_hyperparams\n )\nprint(\"Learning algorithm: %s OK\\n\" % type(learn_algo))\n\n# Learn using learning algorithm\nprint(\"Running Learning Algorithm!!!\")\nlearn_algo.run(resume_training_itr)\nprint(\"Learning Algorithm has finished!\")\nsys.exit()\n\n# ######################### #\n# EXAMPLE OF AN EXPLORATION #\n# ######################### #\nros_rate = rospy.Rate(int(1/Ts)) # hz\ntry:\n episode = 0\n sample_list = SampleList()\n\n print(\"Starting Training...\")\n # Learn First\n for episode in range(total_episodes):\n print(\"\")\n print(\"#\"*15)\n print(\"Episode %d/%d\" % (episode+1, total_episodes))\n print(\"#\"*15)\n\n for n_sample in range(num_samples):\n print(\"\")\n print(\"New Sample: Sample %d/%d\" % (n_sample+1, num_samples))\n i = 0\n\n # Create a sample class\n sample = Sample(bigman_env, T)\n history = [None] * T\n obs_hist = [None] * T\n\n # Collect history\n for i in range(T):\n obs = bigman_env.get_observation()\n state = bigman_env.get_state()\n action = bigman_agent.act(obs=obs)\n bigman_env.send_action(action)\n print(\"Episode %d/%d | Sample:%d/%d | t=%d/%d\" % (episode+1, total_episodes,\n n_sample+1, num_samples,\n i+1, T))\n obs_hist[i] = (obs, action)\n history[i] = (state, action)\n #print(obs)\n #print(\"..\")\n #print(state)\n #print(\"--\")\n #print(\"obs_shape:(%s)\" % obs.shape)\n #print(\"state_shape:(%s)\" % state.shape)\n #print(\"obs active names: %s\" % bigman_env.get_obs_info()['names'])\n #print(\"obs active dims: %s\" % bigman_env.get_obs_info()['dimensions'])\n #print(\"state active names: %s\" % bigman_env.get_state_info()['names'])\n #print(\"state active dims: %s\" % bigman_env.get_state_info()['dimensions'])\n #print(\"\")\n\n #sample.set_acts(action, t=i) # Set action One by one\n #sample.set_obs(obs[:42], obs_name='joint_state', t=i) # Set action One by one\n #sample.set_states(state[:7], state_name='link_position', t=i) # Set action One by one\n\n ros_rate.sleep()\n\n all_actions = np.array([hist[1] for hist in history])\n all_states = np.array([hist[0] for hist in history])\n all_obs = np.array([hist[0] for hist in obs_hist])\n sample.set_acts(all_actions) # Set all actions at the same time\n sample.set_obs(all_obs) # Set all obs at the same time\n sample.set_states(all_states) # Set all states at the same time\n\n # Add sample to sample list\n print(\"Sample added to sample_list!\")\n sample_list.add_sample(sample)\n\n print(\"Resetting environment!\")\n bigman_env.reset(time=1)\n #rospy.sleep(5) # Because I need to find a good way to reset\n\n print(\"\")\n print(\"Exploration finished. %d samples were generated\" % sample_list.num_samples())\n\n print(\"\")\n print(\"Evaluating samples' costs...\")\n #Evaluate costs for all samples for a condition.\n # Important classes\n #cost = act_cost['type'](act_cost)\n #cost = state_cost['type'](state_cost)\n cost = cost_sum['type'](cost_sum)\n iteration_data = IterationData()\n iteration_data.traj_info = TrajectoryInfo() # Cast it directly in gps algo, with M variable\n # Constants.\n N_samples = len(sample_list)\n\n # Compute cost.\n cs = np.zeros((N_samples, T)) # Sample costs of the current iteration.\n cc = np.zeros((N_samples, T)) # Cost estimate constant term.\n cv = np.zeros((N_samples, T, state_dim+action_dim)) # Cost estimate vector term.\n Cm = np.zeros((N_samples, T, state_dim+action_dim, state_dim+action_dim)) # Cost estimate matrix term.\n\n for n in range(N_samples):\n sample = sample_list[n]\n # Get costs.\n l, lx, lu, lxx, luu, lux = cost.eval(sample)\n cc[n, :] = l\n cs[n, :] = l\n\n # Assemble matrix and vector.\n cv[n, :, :] = np.c_[lx, lu]\n Cm[n, :, :, :] = np.concatenate(\n (np.c_[lxx, np.transpose(lux, [0, 2, 1])], np.c_[lux, luu]),\n axis=1\n )\n\n #TODO: Check this part better, and understand it\n # Adjust for expanding cost around a sample.\n X = sample.get_states()\n U = sample.get_acts()\n yhat = np.c_[X, U]\n rdiff = -yhat\n rdiff_expand = np.expand_dims(rdiff, axis=2)\n cv_update = np.sum(Cm[n, :, :, :] * rdiff_expand, axis=1)\n cc[n, :] += np.sum(rdiff * cv[n, :, :], axis=1) + 0.5 * np.sum(rdiff * cv_update, axis=1)\n cv[n, :, :] += cv_update\n\n # Fill in cost estimate.\n iteration_data.traj_info.cc = np.mean(cc, 0) # Constant term (scalar).\n iteration_data.traj_info.cv = np.mean(cv, 0) # Linear term (vector).\n iteration_data.traj_info.Cm = np.mean(Cm, 0) # Quadratic term (matrix).\n\n iteration_data.cs = cs # True value of cost.\n print(\"Mean cost for iteration %d: %f\" % (episode+1, np.sum(np.mean(cs, 0))))\n\n print(\"The episode has finished!\")\n\n\n #print(\"Training the agent...\")\n #bigman_agent.train(history=history)\n #bigman_agent.save(file_save_restore)\n #print(\"Training ready!\")\n\n\n\n #all_samples_obs = sample_list.get_obs(idx=range(2, 4), obs_name='joint_state')\n #print(all_samples_obs.shape)\n\n #for samp in all_samples_obs:\n # plt.plot(samp[:, 0])\n #plt.show()\n #plt.plot(sample.get_acts()[:, 0], 'k')\n #plt.plot(sample.get_obs('joint_state')[:, 0], 'b')\n #plt.plot(sample.get_states('link_position')[:, 0], 'r')\n #plt.show()\n\n print(\"Training finished!\")\n sys.exit()\n\nexcept KeyboardInterrupt:\n print('Training interrupted!')\n\n" }, { "alpha_fraction": 0.5402799844741821, "alphanum_fraction": 0.543358325958252, "avg_line_length": 32.75882339477539, "blob_id": "03f87eb1f3b517743a30b2c30bb53868ba6e6cfe", "content_id": "1f19ef1744a261ca4148e754b4974011e32bef39", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17217, "license_type": "permissive", "max_line_length": 83, "num_lines": 510, "path": "/robolearn/torch/algorithms/rl_algos/ddpg/ddpg.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis has been adapted from Vitchyr Pong's Deep Deterministic Policy Gradient\nhttps://github.com/vitchyr/rlkit\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nimport torch.optim as optim\n\nfrom collections import OrderedDict\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.utils.eval_util import create_stats_ordered_dict\nfrom robolearn.models.policies import RandomPolicy\nfrom robolearn.utils.samplers import rollout\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.torch.utils.data_management import TorchFixedNormalizer\n\n\nclass DDPG(RLAlgorithm, TorchAlgorithm):\n \"\"\"\n Deep Deterministic Policy Gradient (DDPG)\n \"\"\"\n def __init__(\n self,\n explo_env,\n qf,\n policy,\n explo_policy,\n\n replay_buffer,\n batch_size=1024,\n eval_env=None,\n\n target_hard_update_period=1000,\n tau=1e-2,\n use_soft_update=False,\n qf_criterion=None,\n residual_gradient_weight=0,\n epoch_discount_schedule=None,\n eval_with_target_policy=False,\n\n policy_pre_activation_weight=0.,\n\n policy_lr=1e-4,\n qf_lr=1e-3,\n\n policy_weight_decay=0.,\n qf_weight_decay=0,\n\n optimizer='adam',\n # optimizer='rmsprop',\n # optimizer='sgd',\n optimizer_kwargs=None,\n\n obs_normalizer: TorchFixedNormalizer=None,\n action_normalizer: TorchFixedNormalizer=None,\n num_paths_for_normalization=0,\n\n reward_scale=1.,\n\n min_q_value=-np.inf,\n max_q_value=np.inf,\n\n save_replay_buffer=False,\n **kwargs\n ):\n \"\"\"\n\n :param explo_env:\n :param qf:\n :param policy:\n :param explo_policy:\n :param policy_lr:\n :param qf_lr:\n :param qf_weight_decay:\n :param target_hard_update_period:\n :param tau:\n :param use_soft_update:\n :param qf_criterion: Loss function to use for the q function. Should\n be a function that takes in two inputs (y_predicted, y_target).\n :param residual_gradient_weight: c, float between 0 and 1. The gradient\n used for training the Q function is then\n (1-c) * normal td gradient + c * residual gradient\n :param epoch_discount_schedule: A schedule for the discount factor\n that varies with the epoch.\n :param kwargs:\n \"\"\"\n self._target_policy = policy.copy()\n if eval_with_target_policy:\n eval_policy = self._target_policy\n else:\n eval_policy = policy\n RLAlgorithm.__init__(\n self,\n explo_env=explo_env,\n explo_policy=explo_policy,\n eval_env=eval_env,\n eval_policy=eval_policy,\n **kwargs\n )\n self.policy = policy\n self.target_hard_update_period = target_hard_update_period\n self.tau = tau\n self.use_soft_update = use_soft_update\n self.residual_gradient_weight = residual_gradient_weight\n self.policy_pre_activation_weight = policy_pre_activation_weight\n self.epoch_discount_schedule = epoch_discount_schedule\n self.obs_normalizer = obs_normalizer\n self.action_normalizer = action_normalizer\n self.num_paths_for_normalization = num_paths_for_normalization\n self.reward_scale = reward_scale\n\n # Q-function\n self._qf = qf\n self._target_qf = self._qf.copy()\n self.min_q_value = min_q_value\n self.max_q_value = max_q_value\n if qf_criterion is None:\n qf_criterion = nn.MSELoss()\n self.qf_criterion = qf_criterion\n\n # Replay Buffer\n self.replay_buffer = replay_buffer\n self.batch_size = batch_size\n self.save_replay_buffer = save_replay_buffer\n\n # ########## #\n # Optimizers #\n # ########## #\n if optimizer.lower() == 'adam':\n optimizer_class = optim.Adam\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n amsgrad=True,\n # amsgrad=False,\n )\n elif optimizer.lower() == 'rmsprop':\n optimizer_class = optim.RMSprop\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n\n )\n else:\n raise ValueError('Wrong optimizer')\n self._qf_lr = qf_lr\n self._policy_lr = policy_lr\n self._qf_weight_decay = qf_weight_decay\n self._policy_weight_decay = qf_weight_decay\n\n # Q-function optimizer\n self._qf_optimizer = optimizer_class(\n self._qf.parameters(),\n lr=qf_lr,\n weight_decay=qf_weight_decay,\n **optimizer_kwargs\n )\n\n # Policy optimizer\n self._policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n weight_decay=policy_weight_decay,\n **optimizer_kwargs\n )\n\n # Useful Variables for logging\n self.log_data = dict()\n self.log_data['Raw Pol Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Pol Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Qf Loss'] = np.zeros(self.num_train_steps_per_epoch)\n self.log_data['Q pred'] = np.zeros(\n (self.num_train_steps_per_epoch, batch_size)\n )\n self.log_data['Q target'] = np.zeros(\n (self.num_train_steps_per_epoch, batch_size)\n )\n self.log_data['Bellman Error'] = np.zeros(\n (self.num_train_steps_per_epoch, batch_size)\n )\n self.log_data['Policy Actions'] = np.zeros(\n (self.num_train_steps_per_epoch, batch_size, self.explo_env.action_dim)\n )\n\n def pretrain(self, n_pretrain_samples):\n if (\n self.num_paths_for_normalization == 0\n or (self.obs_normalizer is None and self.action_normalizer is None)\n ):\n observation = self.explo_env.reset()\n for ii in range(n_pretrain_samples):\n action = self.explo_env.action_space.sample()\n # Interact with environment\n next_ob, reward, terminal, env_info = (\n self.explo_env.step(action)\n )\n agent_info = None\n\n # Increase counter\n self._n_env_steps_total += 1\n # Create np.array of obtained terminal and reward\n terminal = np.array([terminal])\n reward = np.array([reward])\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_ob,\n agent_info=agent_info,\n env_info=env_info,\n )\n observation = next_ob\n\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n if terminal:\n self.explo_env.reset()\n else:\n pretrain_paths = []\n random_policy = RandomPolicy(self.explo_env.action_space)\n while len(pretrain_paths) < self.num_paths_for_normalization:\n path = rollout(self.explo_env, random_policy, self.max_path_length)\n pretrain_paths.append(path)\n ob_mean, ob_std, ac_mean, ac_std = (\n compute_normalization(pretrain_paths)\n )\n if self.obs_normalizer is not None:\n self.obs_normalizer.set_mean(ob_mean)\n self.obs_normalizer.set_std(ob_std)\n self._target_qf.obs_normalizer = self.obs_normalizer\n self._target_policy.obs_normalizer = self.obs_normalizer\n if self.action_normalizer is not None:\n self.action_normalizer.set_mean(ac_mean)\n self.action_normalizer.set_std(ac_std)\n self._target_qf.action_normalizer = self.action_normalizer\n self._target_policy.action_normalizer = self.action_normalizer\n\n def _do_training(self):\n # Get batch of samples\n batch = self.get_batch()\n\n # Get common data from batch\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n rewards = batch['rewards'] * self.reward_scale\n terminals = batch['terminals']\n\n \"\"\"\n Policy operations.\n \"\"\"\n if self.policy_pre_activation_weight > 0:\n policy_actions, policy_info = self.policy(\n obs, return_preactivations=True,\n )\n pre_tanh_value = policy_info['pre_tanh_value']\n pre_activation_policy_loss = (\n (pre_tanh_value**2).sum(dim=1).mean()\n )\n q_output = self._qf(obs, policy_actions)[0]\n raw_policy_loss = - q_output.mean()\n policy_loss = (\n raw_policy_loss +\n pre_activation_policy_loss * self.policy_pre_activation_weight\n )\n else:\n policy_actions = self.policy(obs)[0]\n q_output = self._qf(obs, policy_actions)[0]\n raw_policy_loss = policy_loss = - q_output.mean()\n\n \"\"\"\n Critic operations.\n \"\"\"\n\n next_actions = self._target_policy(next_obs)[0]\n # speed up computation by not backpropping these gradients\n next_actions.detach()\n target_q_values = self._target_qf(\n next_obs,\n next_actions,\n )[0]\n q_target = rewards + (1. - terminals) * self.discount * target_q_values\n q_target = q_target.detach()\n q_target = torch.clamp(q_target, self.min_q_value, self.max_q_value)\n # Hack for ICLR rebuttal\n if hasattr(self, 'reward_type') and self.reward_type == 'indicator':\n q_target = \\\n torch.clamp(q_target, -self.reward_scale/(1-self.discount), 0)\n q_pred = self._qf(obs, actions)[0]\n bellman_errors = (q_pred - q_target) ** 2\n qf_loss = self.qf_criterion(q_pred, q_target)\n\n if self.residual_gradient_weight > 0:\n residual_next_actions = self.policy(next_obs)\n # speed up computation by not backpropping these gradients\n residual_next_actions.detach()\n residual_target_q_values = self._qf(\n next_obs,\n residual_next_actions,\n )[0]\n residual_q_target = (\n rewards\n + (1. - terminals) * self.discount * residual_target_q_values\n )\n residual_bellman_errors = (q_pred - residual_q_target) ** 2\n # noinspection PyUnresolvedReferences\n residual_qf_loss = residual_bellman_errors.mean()\n qf_loss = (\n self.residual_gradient_weight * residual_qf_loss\n + (1 - self.residual_gradient_weight) * qf_loss\n )\n\n \"\"\"\n Update Networks\n \"\"\"\n\n self._policy_optimizer.zero_grad()\n policy_loss.backward()\n self._policy_optimizer.step()\n\n self._qf_optimizer.zero_grad()\n qf_loss.backward()\n self._qf_optimizer.step()\n\n # ###################### #\n # Update Target Networks #\n # ###################### #\n if self.use_soft_update:\n ptu.soft_update_from_to(self.policy, self._target_policy, self.tau)\n ptu.soft_update_from_to(self._qf, self._target_qf, self.tau)\n else:\n if self._n_env_steps_total % self.target_hard_update_period == 0:\n ptu.copy_model_params_from_to(self._qf, self._target_qf)\n ptu.copy_model_params_from_to(self.policy, self._target_policy)\n\n\n # ############### #\n # LOG Useful Data #\n # ############### #\n step_idx = self._n_epoch_train_steps\n self.log_data['Qf Loss'][step_idx] = ptu.get_numpy(qf_loss)\n self.log_data['Pol Loss'][step_idx] = ptu.get_numpy(policy_loss)\n self.log_data['Raw Pol Loss'][step_idx] = ptu.get_numpy(raw_policy_loss)\n self.log_data['Q pred'][step_idx] = ptu.get_numpy(q_pred).squeeze(-1)\n self.log_data['Q target'][step_idx] = ptu.get_numpy(q_target).squeeze(-1)\n self.log_data['Bellman Error'][step_idx] = \\\n ptu.get_numpy(bellman_errors).squeeze(-1)\n self.log_data['Policy Actions'][step_idx] = ptu.get_numpy(policy_actions)\n\n def _not_do_training(self):\n return\n\n @property\n def torch_models(self):\n networks_list = [\n self.policy,\n self._qf,\n self._target_policy,\n self._target_qf,\n ]\n\n return networks_list\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Stuff to save in file.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n snapshot = RLAlgorithm.get_epoch_snapshot(self, epoch)\n\n snapshot.update(\n policy=self.eval_policy,\n trained_policy=self.policy,\n target_policy=self._target_policy,\n exploration_policy=self.explo_policy,\n qf=self._qf,\n target_qf=self._target_qf,\n )\n\n # Replay Buffer\n if self.save_replay_buffer:\n snapshot.update(\n replay_buffer=self.replay_buffer,\n )\n\n return snapshot\n\n def _update_logging_data(self):\n max_step = max(self._n_epoch_train_steps, 1)\n\n if self.eval_statistics is None:\n self.eval_statistics = OrderedDict()\n\n self.eval_statistics['QF Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf Loss'][:max_step]\n ))\n self.eval_statistics['Policy Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol Loss'][:max_step]\n ))\n self.eval_statistics['Raw Policy Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Raw Pol Loss'][:max_step]\n ))\n self.eval_statistics['Preactivation Policy Loss'] = (\n self.eval_statistics['Policy Loss'] -\n self.eval_statistics['Raw Policy Loss']\n )\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Predictions',\n np.nan_to_num(np.mean(self.log_data['Q pred'][:max_step]))\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Targets',\n np.nan_to_num(np.mean(\n self.log_data['Q target'][:max_step]\n ))\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Bellman Errors',\n np.nan_to_num(np.mean(\n self.log_data['Bellman Error'][:max_step]\n ))\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy Action',\n np.nan_to_num(np.mean(\n self.log_data['Policy Actions'][:max_step]\n ))\n ))\n\n def evaluate(self, epoch):\n self._update_logging_data()\n RLAlgorithm.evaluate(self, epoch)\n\n # Reset log_data\n for key in self.log_data.keys():\n self.log_data[key].fill(0)\n\n def get_batch(self):\n batch = self.replay_buffer.random_batch(self.batch_size)\n\n return batch\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_observation,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n RLAlgorithm._handle_step(\n self,\n observation=observation,\n action=action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _end_rollout(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n\n self.replay_buffer.terminate_episode()\n\n RLAlgorithm._end_rollout(self)\n\n\ndef compute_normalization(paths):\n obs = np.vstack([path[\"observations\"] for path in paths])\n ob_mean = np.mean(obs, axis=0)\n ob_std = np.std(obs, axis=0)\n actions = np.vstack([path[\"actions\"] for path in paths])\n ac_mean = np.mean(actions, axis=0)\n ac_std = np.std(actions, axis=0)\n return ob_mean, ob_std, ac_mean, ac_std\n" }, { "alpha_fraction": 0.6068302989006042, "alphanum_fraction": 0.6200446486473083, "avg_line_length": 29.036083221435547, "blob_id": "3665e7877a99e53d539059a540d31ffcfa181545", "content_id": "591a8a620e431180fe36db643709ed27725e0df3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5827, "license_type": "permissive", "max_line_length": 98, "num_lines": 194, "path": "/examples/rl_algos/sql/manipulator_iusql.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Soft Q-learning on TwoGoalEnv.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\n\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn.utils.data_management.multigoal_replay_buffer import MultiGoalReplayBuffer\n\nfrom robolearn_gym_envs.pybullet import Pusher2D3DofGoalCompoEnv\n\nfrom robolearn.torch.sql.iu_sql import IUSQL\n\nfrom robolearn.torch.sql.value_functions import NNQFunction\nfrom robolearn.torch.sql.value_functions import AvgNNQFunction\n# from robolearn.torch.policies import TanhGaussianPolicy\nfrom robolearn.torch.sql.policies import StochasticPolicy\n\nimport argparse\n\n\ndef experiment(variant):\n ptu.set_gpu_mode(variant['gpu'])\n\n goal = variant['env_params'].get('goal')\n variant['env_params']['goal_poses'] = \\\n [goal, (goal[0], 'any'), ('any', goal[1])]\n variant['env_params'].pop('goal')\n\n env = NormalizedBoxEnv(\n Pusher2D3DofGoalCompoEnv(**variant['env_params'])\n )\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n n_unintentional = 2\n\n net_size = variant['net_size']\n u_qfs = [NNQFunction(obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=(net_size, net_size))\n for _ in range(n_unintentional)]\n i_qf = AvgNNQFunction(obs_dim=obs_dim,\n action_dim=action_dim,\n q_functions=u_qfs)\n\n # _i_policy = TanhGaussianPolicy(\n u_policies = [StochasticPolicy(\n hidden_sizes=[net_size, net_size],\n obs_dim=obs_dim,\n action_dim=action_dim,\n ) for _ in range(n_unintentional)]\n i_policy = StochasticPolicy(\n hidden_sizes=[net_size, net_size],\n obs_dim=obs_dim,\n action_dim=action_dim,)\n\n replay_buffer = MultiGoalReplayBuffer(\n variant['algo_params']['replay_buffer_size'],\n np.prod(env.observation_space.shape),\n np.prod(env.action_space.shape),\n n_unintentional\n )\n variant['algo_params']['replay_buffer'] = replay_buffer\n\n # QF Plot\n variant['algo_params']['_epoch_plotter'] = None\n\n algorithm = IUSQL(\n env=env,\n training_env=env,\n save_environment=False,\n u_qfs=u_qfs,\n u_policies=u_policies,\n i_policy=i_policy,\n i_qf=i_qf,\n algo_interface='torch',\n min_buffer_size=variant['algo_params']['batch_size'],\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n algorithm.train()\n\n return algorithm\n\n\npath_length = 500\npaths_per_epoch = 5\npaths_per_eval = 1\npaths_per_hard_update = 12\n\nexpt_params = dict(\n algo_params=dict(\n # Common RLAlgo params\n num_steps_per_epoch=paths_per_epoch * path_length,\n num_epochs=1000, # n_epochs\n num_updates_per_env_step=1, # Like n_train_repeat??\n num_steps_per_eval=paths_per_eval * path_length,\n # EnvSampler params\n max_path_length=path_length, # max_path_length\n render=False,\n # ReplayBuffer params\n batch_size=64, # batch_size\n replay_buffer_size=1e4,\n # SoftQLearning params\n # TODO: _epoch_plotter\n policy_lr=3e-4,\n qf_lr=3e-4,\n value_n_particles=16,\n use_hard_updates=True, # Hard update for target Q-fcn\n hard_update_period=paths_per_hard_update*path_length, # td_target_update_interval (steps)\n soft_target_tau=0.001, # Not used if use_hard_updates=True\n # TODO:kernel_fn\n kernel_n_particles=32,\n kernel_update_ratio=0.5,\n discount=0.99,\n reward_scale=0.1,\n ),\n net_size=64,\n)\n\nenv_params = dict(\n is_render=False,\n obs_with_img=False,\n goal_poses=None,\n rdn_goal_pose=True,\n tgt_pose=None,\n rdn_tgt_object_pose=True,\n sim_timestep=0.001,\n frame_skip=10,\n obs_distances=False,\n tgt_cost_weight=1.5,\n goal_cost_weight=1.0,\n ctrl_cost_weight=1.0e-4,\n)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=50)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--gpu', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n expt_variant = expt_params\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = 'pusher_compo'\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n expt_variant['env_params'] = env_params\n expt_variant['env_params']['is_render'] = args.render\n\n # TODO: MAKE THIS A SCRIPT ARGUMENT\n expt_variant['env_params']['goal'] = (0.75, 0.75)\n expt_variant['env_params']['tgt_pose'] = (0.6, 0.25, 1.4660)\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algorithm = experiment(expt_variant)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.5829318761825562, "alphanum_fraction": 0.5894700884819031, "avg_line_length": 38.80821990966797, "blob_id": "a2bf0f6e98e50a643a629b89dfb99f0702c3587e", "content_id": "4ceaa721f42be5bf4e45141e63324cbada52d45c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2906, "license_type": "permissive", "max_line_length": 81, "num_lines": 73, "path": "/robolearn/utils/data_management/multigoal_replay_buffer.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\nfrom robolearn.utils.data_management.replay_buffer import ReplayBuffer\n\n\nclass MultiGoalReplayBuffer(ReplayBuffer):\n def __init__(self, max_replay_buffer_size, obs_dim, action_dim,\n reward_vector_size):\n if not max_replay_buffer_size > 1:\n raise ValueError(\"Invalid Maximum Replay Buffer Size: {}\".format(\n max_replay_buffer_size)\n )\n if not reward_vector_size > 0:\n raise ValueError(\"Invalid Reward Vector Size: {}\".format(\n reward_vector_size)\n )\n\n max_size = int(max_replay_buffer_size)\n multi_size = int(reward_vector_size)\n\n self._obs_buffer = np.zeros((max_size, obs_dim), dtype=np.float32)\n self._next_obs_buffer = np.zeros((max_size, obs_dim), dtype=np.float32)\n self._acts_buffer = np.zeros((max_size, action_dim), dtype=np.float32)\n self._rewards_buffer = np.zeros((max_size, 1), dtype=np.float32)\n self._terminals_buffer = np.zeros((max_size, 1), dtype='uint8')\n self._rew_vects_buffer = np.zeros((max_size, multi_size))\n self._term_vects_buffer = np.zeros((max_size, multi_size), dtype='uint8')\n\n self._obs_dim = obs_dim\n self._action_dim = action_dim\n self._max_size = max_size\n self._top = 0\n self._size = 0\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n self._obs_buffer[self._top] = observation\n self._acts_buffer[self._top] = action\n self._rewards_buffer[self._top] = reward\n self._terminals_buffer[self._top] = terminal\n self._next_obs_buffer[self._top] = next_observation\n self._rew_vects_buffer[self._top] = \\\n kwargs['env_info']['reward_multigoal']\n self._term_vects_buffer[self._top] = \\\n kwargs['env_info']['terminal_multigoal']\n self._advance()\n\n def terminate_episode(self):\n pass\n\n def _advance(self):\n self._top = (self._top + 1) % self._max_size\n if self._size < self._max_size:\n self._size += 1\n\n def random_batch(self, batch_size):\n if batch_size > self._size:\n raise AttributeError('Not enough samples to get. %d bigger than '\n 'current %d!' % (batch_size, self._size))\n\n indices = np.random.randint(0, self._size, batch_size)\n return dict(\n observations=self._obs_buffer[indices],\n actions=self._acts_buffer[indices],\n rewards=self._rewards_buffer[indices],\n terminals=self._terminals_buffer[indices],\n next_observations=self._next_obs_buffer[indices],\n reward_vectors=self._rew_vect_buffer[indices],\n terminal_vectors=self._term_vect_buffer[indices],\n )\n\n def available_samples(self):\n return self._size\n" }, { "alpha_fraction": 0.6079733967781067, "alphanum_fraction": 0.6096345782279968, "avg_line_length": 20.5, "blob_id": "c44df039891fda3315e76ec6b3dd4e774e62e5df", "content_id": "a03c53221fc6716294d6bdf3575b1310e506f03f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "permissive", "max_line_length": 54, "num_lines": 28, "path": "/robolearn/models/transitions/transition.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import abc\nfrom future.utils import with_metaclass\n\n\nclass Transition(with_metaclass(abc.ABCMeta, object)):\n \"\"\"\n General transition function interface.\n :math:`s_{t+1} = T(s_t, a_t)`\n \"\"\"\n\n def __init__(self, obs_dim, action_dim):\n self._obs_dim = obs_dim\n self._action_dim = action_dim\n\n @abc.abstractmethod\n def get_next(self, observation, action):\n pass\n\n def get_nexts(self, observations, actions):\n pass\n\n @property\n def obs_dim(self):\n return self._obs_dim\n\n @property\n def action_dim(self):\n return self._action_dim\n" }, { "alpha_fraction": 0.491012305021286, "alphanum_fraction": 0.49668875336647034, "avg_line_length": 30.53731346130371, "blob_id": "65dd7ff60b53fefb0bc2c9edbf6d2d4d2c2bba7c", "content_id": "2b2908ffe06c2aa8ae8854d75e83811848e2a290", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2114, "license_type": "permissive", "max_line_length": 75, "num_lines": 67, "path": "/robolearn/torch/policies/mlp_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.torch.utils.nn import Mlp\nfrom robolearn.models.policies import Policy\nfrom robolearn.utils.serializable import Serializable\n\n\nclass MlpPolicy(Mlp, Serializable, Policy):\n def __init__(self,\n obs_dim,\n action_dim,\n hidden_sizes=(100, 100),\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0,\n output_w_init='xavier_normal',\n output_b_init_val=0,\n **kwargs\n ):\n\n Policy.__init__(self,\n action_dim=action_dim)\n\n # self._serializable_initialized = False\n # Serializable.quick_init(self, locals())\n self.save_init_params(locals())\n Mlp.__init__(self,\n hidden_sizes=hidden_sizes,\n input_size=obs_dim,\n output_size=action_dim,\n hidden_w_init=hidden_w_init,\n hidden_b_init_val=hidden_b_init_val,\n output_w_init=output_w_init,\n output_b_init_val=output_b_init_val,\n **kwargs\n )\n\n def get_action(self, obs_np, **kwargs):\n values, info_dict = \\\n self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n\n return values[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n return self.eval_np(obs_np, **kwargs)\n\n def forward(\n self,\n obs,\n return_preactivations=False,\n ):\n nn_ouput = Mlp.forward(self, obs,\n return_preactivations=return_preactivations)\n\n if return_preactivations:\n action = nn_ouput[0]\n pre_activations = nn_ouput[1]\n info_dict = dict(\n pre_activations=pre_activations,\n )\n else:\n action = nn_ouput\n info_dict = dict()\n\n return action, info_dict\n\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 27, "blob_id": "5725949d436bdc449d0abfe5afacd400ae20f427", "content_id": "ade72ef725c784de7cdb7299cee30ba5f19d64b0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "permissive", "max_line_length": 37, "num_lines": 3, "path": "/robolearn/utils/launchers/config.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "# Change this\n# LOCAL_LOG_DIR = '/tmp/rlkit/'\nLOCAL_LOG_DIR = '/home/desteban/logs'\n" }, { "alpha_fraction": 0.5642478466033936, "alphanum_fraction": 0.5816121101379395, "avg_line_length": 24.328702926635742, "blob_id": "a38b2238107c8508c71a571dfd044f5ff61cdaa9", "content_id": "7c15fbfcb8c9bbcae6c617b6cc8c9a331a70c3bb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5471, "license_type": "permissive", "max_line_length": 96, "num_lines": 216, "path": "/examples/miscellaneous/test_lin_gauss_pol.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom robolearn.torch.policies import LinearGaussianPolicy\nimport matplotlib.pyplot as plt\n\nbatch_size = 5\nobs_dim = 3\naction_dim = 3\nn_policies = 2\n\nTend = 5\nTs = 0.01\nT = int(Tend/Ts)\n\ntime = torch.linspace(0, Tend-Ts, T)\ndes_obs_x = torch.sin(time)\ndes_obs_y = torch.cos(time)\ndes_obs_z = torch.cos(time*2)\nobs = torch.cat([\n des_obs_x.unsqueeze(-1),\n des_obs_y.unsqueeze(-1),\n des_obs_z.unsqueeze(-1),\n ],\n dim=-1,\n)\n\nnn_pol = LinearGaussianPolicy(\n obs_dim=obs_dim,\n action_dim=action_dim,\n T=T,\n )\n\nnn_pol.K.data.uniform_(-1, 1)\nnn_pol.k.data.uniform_(-1, 1)\n\nprint('##'*10)\nprint(nn_pol)\nprint('##'*10)\nprint('MODULE PARAMETERS:')\nfor name, p in nn_pol.named_parameters():\n print(name, p.shape, 'grad?', p.requires_grad)\nprint('##'*10)\n\nfor name, p in nn_pol.named_parameters():\n print(name, '\\n', p)\nprint('##'*10)\n# print('SHARED PARAMETERS:')\n# for name, p in nn_pol.named_shared_parameters():\n# print(name, p.shape)\n# print(p.data)\n# print('.')\n# print('##'*10)\n# print('MIXING PARAMETERS:')\n# for name, p in nn_pol.named_mixing_parameters():\n# print(name, p.shape)\n# print(p.data)\n# print('.')\n# print('##'*10)\n# print('ALL POLICIES PARAMETERS:')\n# for name, p in nn_pol.named_policies_parameters():\n# print(name, p.shape)\n# print(p.data)\n# print('.')\n# print('##'*10)\n# print('SPECIFIC POLICY PARAMETERS:')\n# for pol_idx in range(nn_pol.n_heads):\n# print('--- POLICY ', pol_idx, ' ---')\n# for name, p in nn_pol.named_policies_parameters(idx=pol_idx):\n# print(name, p.shape)\n# print(p.data)\n# print('.')\n# print('##\\n'*5)\n# for param in nn_pol.parameters():\n# print(param.shape)\n\nprint('##\\n'*5)\n# input(\"Press a key to start training...\")\n\n\n# obs = torch.rand((T, obs_dim))\n\n\nact_des = torch.rand((T, action_dim))\n# act_des = torch.tensor([[0.1],\n# [0.1],\n# [0.3]])\nact_des = act_des.uniform_(-1, 1)\n\n# o = nn_pol(a, _val_idxs=[0], deterministic=True)\n# error = torch.sum(b0 - o[0][0])\n\nloss_fn = torch.nn.MSELoss(size_average=False)\nlearning_rate = 1e-2\noptimizer_pol = torch.optim.Adam(nn_pol.parameters(), learning_rate)\n\nprint('obs shape:', obs.shape)\nprint('action shape:', act_des.shape)\n\n\nparams_initial = list()\nfor param in nn_pol.parameters():\n params_initial.append(param.data.clone())\n\noutput_initial = nn_pol(obs)\n\n\nFdyn = torch.rand((T, obs_dim, action_dim))\nfdyn = torch.rand((T, obs_dim))\n\n\ndef sim_dynamics(obs0, policy):\n # next_obs = torch.zeros((T, obs_dim), requires_grad=True)\n # next_obs.data[0, :] = obs0\n obs_list = []\n obs_list.append(obs0.unsqueeze(0))\n for t in range(T-1):\n obs_t = obs_list[-1]\n act = policy(obs_t, t=t)[0]\n # next_obs.data[t+1, :] = next_obs.data[t+1, :] + \\\n # (torch.sum(Fdyn[t, :, :]*act.unsqueeze(dim=-2),\n # dim=-1) + fdyn[t, :])\n dobs_t = torch.sum(Fdyn[t, :, :]*act.unsqueeze(dim=-1), dim=-1) \\\n + fdyn[t, :]\n obs_tp1 = obs_t + dobs_t*Ts\n obs_list.append(obs_tp1)\n # next_obs[t+1, :] = next_obs[t, :] + act*Ts\n # print(next_obs.data[t, :])\n # print(act)\n # print(next_obs.data[t+1, :])\n # input('wuuu')\n next_obs = torch.cat(obs_list, dim=0)\n\n return next_obs\n\n\nnext_obs_initial = sim_dynamics(obs[0, :], nn_pol)\n\nfor tt in range(2500):\n # act_pred, policy_info = nn_pol(obs)\n\n next_obs = sim_dynamics(obs[0, :], nn_pol)\n\n # loss = loss_fn(act_pred, act_des)\n loss = loss_fn(next_obs[:-1, :], obs[1:, :])\n # loss = loss_fn(policy_mean, act_des)\n # loss = loss_fn(policy_log_std, act_des)\n # loss = loss_fn(pre_tanh_value, act_des)\n\n print('t=', tt, '| loss=', loss.item())\n\n optimizer_pol.zero_grad()\n loss.backward()\n\n if tt == 0:\n print('Showing the gradients')\n for name, param in nn_pol.named_parameters():\n print('----')\n print(name, '\\n', param.grad)\n # input('Press a key to continue training...')\n\n optimizer_pol.step()\n\n# error.backward()\n\nnext_obs = sim_dynamics(obs[0, :], nn_pol)\n\nfig, axs = plt.subplots(obs_dim, 1)\n\nfor ii in range(obs_dim):\n axs[ii].plot(time[1:].data.numpy(), obs[1:, ii].data.numpy(), label='des')\n axs[ii].plot(time[1:].data.numpy(), next_obs[:-1, ii].data.numpy(), label='obtained')\n axs[ii].plot(time[1:].data.numpy(), next_obs_initial[:-1, ii].data.numpy(), label='initial')\n axs[ii].legend()\nplt.show()\n\n# print('='*10)\n# print('='*10)\n# output = nn_pol(obs)\n# print('Initial output')\n# for key, val in output_initial[1].items():\n# print(key, '\\n', val)\n# print('==')\n# print('Final output')\n# for key, val in output[1].items():\n# print(key, '\\n', val)\n# print('action_des', act_des)\n# print('action_pred_initial', output_initial[0])\n# print('action_pred', output[0])\n# print('action_one_by_one')\n# for ii in range(batch_size):\n# print(ii, '-->', nn_pol(obs[ii])[0])\n#\n# print('_______ DEBUG___')\n# nn_pol(obs)\n\n\ninput('Show parameters...')\n\nprint('##\\n'*2)\n\nparams_final = list()\nfor param in nn_pol.shared_parameters():\n params_final.append(param.data.clone())\n\nprint('##\\n'*2)\nprint('LOSS', loss)\nfor name, param in nn_pol.named_parameters():\n print('--')\n print('NAME', name)\n print('DATA', param.data)\n print('GRAD', param.grad)\n\nprint('init_shared')\nprint(params_initial)\nprint('final_shared')\nprint(params_final)\ninput('wuuu')\n" }, { "alpha_fraction": 0.5632209181785583, "alphanum_fraction": 0.5884562730789185, "avg_line_length": 42.93397903442383, "blob_id": "39767823a218e8f4ad29b71e18cad830f8456229", "content_id": "f6257e913eae0ccd06cf336feb4fa8c224f46035", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22627, "license_type": "permissive", "max_line_length": 161, "num_lines": 515, "path": "/scenarios/gym-cartpole.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\n\nimport numpy as np\nfrom robolearn.old_utils.sampler import Sampler\n\nfrom robolearn.old_agents import GPSAgent\nfrom robolearn.old_algos.gps.mdgps import MDGPS\nfrom robolearn.old_algos.gps.pigps import PIGPS\nfrom robolearn.old_algos.trajopt.dreps import DREPS\nfrom robolearn.old_algos.trajopt.ilqr import ILQR\nfrom robolearn.old_algos.trajopt.mdreps import MDREPS\nfrom robolearn.old_algos.trajopt.pi2 import PI2\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_CONSTANT\nfrom robolearn.old_envs import GymEnv\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\nfrom robolearn.old_utils.print_utils import change_print_color\nfrom robolearn.old_utils.traj_opt.traj_opt_dreps import TrajOptDREPS\nfrom robolearn.old_utils.traj_opt.traj_opt_lqr import TrajOptLQR\nfrom robolearn.old_utils.traj_opt.traj_opt_mdreps import TrajOptMDREPS\nfrom robolearn.old_utils.traj_opt.traj_opt_pi2 import TrajOptPI2\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been kill by the user!!\")\n os._exit(1)\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\n# ################## #\n# ################## #\n# ### PARAMETERS ### #\n# ################## #\n# ################## #\nlearning_algorithm = 'MDREPS'\n#env_name = 'Pendulum-v0'\nenv_name = 'Reacher-v1'\n# Task parameters\nTs = 0.01\nTreach = 1\nTlift = 0 # 3.8\nTinter = 0 # 0.5\nTend = 0 # 0.7\n# EndTime = 4 # Using final time to define the horizon\nEndTime = Treach + Tinter + Tlift + Tend # Using final time to define the horizon\ninit_with_demos = False\ngenerate_dual_sets = True\ndemos_dir = None # 'TASKSPACE_TORQUE_CTRL_DEMO_2017-07-21_16:32:39'\ndual_dir = 'DUAL_DEMOS_2017-08-23_07:10:35'\nseed = 6\n\nrandom.seed(seed)\nnp.random.seed(seed)\n\n# ################### #\n# ################### #\n# ### ENVIRONMENT ### #\n# ################### #\n# ################### #\nchange_print_color.change('BLUE')\nprint(\"\\nCreating GYM environment...\")\n\n# reset_condition_bigman_drill_gazebo_fcn = Reset_condition_bigman_drill_gazebo()\n\n\n# Create a BIGMAN ROS EnvInterface\ngym_env = GymEnv(name=env_name, render=False, seed=seed)\n\naction_dim = gym_env.action_dim\nstate_dim = gym_env.state_dim\nobservation_dim = gym_env.obs_dim\n\nprint(\"Gym Environment OK. name:%s (action_dim=%d), (state_dim=%d)\" % (env_name, action_dim, state_dim))\n\n# ################# #\n# ################# #\n# ##### AGENT ##### #\n# ################# #\n# ################# #\nchange_print_color.change('CYAN')\nprint(\"\\nCreating Gym Agent...\")\n\npolicy_params = {\n 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n 'network_params': {\n 'n_layers': 1, # Hidden layers??\n 'dim_hidden': [40], # List of size per n_layers\n 'obs_names': gym_env.get_obs_info()['names'],\n 'obs_dof': gym_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n },\n # Initialization.\n 'init_var': 0.1, # Initial policy variance.\n 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # Solver hyperparameters.\n 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n 'batch_size': 15,\n 'lr': 0.001, # Base learning rate (by default it's fixed).\n 'lr_policy': 'fixed', # Learning rate policy.\n 'momentum': 0.9, # Momentum.\n 'weight_decay': 0.005, # Weight decay.\n 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # set gpu usage.\n 'gpu_mem_percentage': 0.05,\n 'use_gpu': 1, # Whether or not to use the GPU for training.\n 'gpu_id': 0,\n 'random_seed': 1,\n 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n # 'weights_file_prefix': EXP_DIR + 'policy',\n}\npolicy_opt = {\n 'type': PolicyOptTf,\n 'hyperparams': policy_params\n }\n\ngym_agent = GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy_opt=policy_opt)\nprint(\"Bigman Agent:%s OK\\n\" % type(gym_agent))\n\n\n# ################# #\n# ################# #\n# ##### COSTS ##### #\n# ################# #\n# ################# #\n# Action Cost\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(action_dim) * 1e-4,\n 'target': None, # Target action value\n}\n\n# State Cost\ntarget_distance_state = np.array([1.0, 0.0, 0.0]) # [cos(0), sin(0), 0] / [cost_theta, sin_theta, theta_dot]\nstate_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'l1': 0.1, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10.0, # Weight multiplier on final time step.\n 'data_types': {\n 'gym_state': {\n # 'wp': np.ones_like(target_state), # State weights - must be set.\n 'wp': np.array([1.0, 1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_state, # Target state - must be set.\n 'average': None, # (12, 3),\n 'data_idx': gym_env.get_state_info(name='gym_state')['idx']\n },\n },\n}\n\ncost_sum = {\n 'type': CostSum,\n 'costs': [act_cost, state_cost_distance],\n 'weights': [1.0e-1, 1.0e-0],\n}\n\n\n# ########## #\n# ########## #\n# Conditions #\n# ########## #\n# ########## #\ndrill_relative_poses = [] # Used only in dual demos\ncondition0 = 2 # Seed number\ngym_env.add_condition(condition0)\n\n# #################### #\n# #################### #\n# ## DEMONSTRATIONS ## #\n# #################### #\n# #################### #\ndemos_samples = None\n\n# DUAL SAMPLES\ngood_trajs = None\nbad_trajs = None\n\n\n# ######################## #\n# ######################## #\n# ## LEARNING ALGORITHM ## #\n# ######################## #\n# ######################## #\nchange_print_color.change('YELLOW')\nprint(\"\\nConfiguring learning algorithm...\\n\")\n\n# Learning params\nresume_training_itr = None # Resume from previous training iteration\n# data_files_dir = 'GPS_2017-09-01_15:22:55' # None # In case we want to resume from previous training\ndata_files_dir = None # In case we want to resume from previous training\n\nif demos_samples is None:\n # # init_traj_distr values can be lists if they are different for each condition\n # init_traj_distr = {'type': init_lqr,\n # # Parameters to calculate initial COST function based on stiffness\n # 'init_var': 3.0e-1, # Initial Variance\n # 'stiffness': 5.0e-1, # Stiffness (multiplies q)\n # 'stiffness_vel': 0.01, # 0.5, # Stiffness_vel*stiffness (multiplies qdot)\n # 'final_weight': 10.0, # Multiplies cost at T\n # # Parameters for guessing dynamics\n # 'init_acc': np.zeros(action_dim), # dU vector(np.array) of accelerations, default zeros.\n # #'init_gains': 1.0*np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n # #'init_gains': 1.0/np.array([5000.0, 8000.0, 5000.0, 5000.0, 300.0, 2000.0, 300.0]), # dU vector(np.array) of gains, default ones.\n # 'init_gains': np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n # }\n init_traj_distr = {'type': init_pd,\n 'init_var': np.array([5.0e-2])*1.0e-00,\n 'pos_gains': 0.001, #np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])*1.0e+1, # 0.001, # Position gains (Default:10)\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': 1, # Total joints in state\n 'state_to_pd': 'joints', # Joints\n 'dDistance': None,\n }\nelse:\n init_traj_distr = {'type': init_demos,\n 'sample_lists': demos_samples\n }\n\n# Trajectory Optimization Options\ntraj_opt_lqr = {'type': TrajOptLQR,\n 'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n }\n\ntraj_opt_pi2 = {'type': TrajOptPI2,\n 'del0': 1e-4, # Dual variable updates for non-PD Q-function.\n 'kl_threshold': 1.0, # KL-divergence threshold between old and new policies.\n 'covariance_damping': 10.0, # 2.0, # If greater than zero, covariance is computed as a multiple of the old\n # covariance. Multiplier is taken to the power (1 / covariance_damping).\n # If greater than one, slows down convergence and keeps exploration noise high for more iterations.\n 'min_temperature': 0.001, # Minimum bound of the temperature optimization for the soft-max\n # probabilities of the policy samples.\n 'use_sumexp': False,\n 'pi2_use_dgd_eta': True, # False,\n 'pi2_cons_per_step': True,\n }\n\ntraj_opt_dreps = {'type': TrajOptDREPS,\n 'epsilon': 1.0, # KL-divergence threshold between old and new policies.\n 'xi': 5.0,\n 'chi': 2.0,\n 'dreps_cons_per_step': True,\n 'min_eta': 0.001, # Minimum bound of the temperature optimization for the soft-max\n 'covariance_damping': 2.0,\n 'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).\n }\n\ntraj_opt_mdreps = {'type': TrajOptMDREPS,\n 'del0': 1e-4, # Eta updates for non-SPD Q-function (non-SPD correction step).\n 'del0_good': 1e-4, # Omega updates for non-SPD Q-function (non-SPD correction step).\n 'del0_bad': 1e-8, # Nu updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'min_omega': 0,#1e-8, # At min_omega, kl_div > kl_step\n 'max_omega': 0,#1e16, # At max_omega, kl_div < kl_step\n 'min_nu': 0,#1e-8, # At min_nu, kl_div > kl_step\n 'max_nu': 0,#2.0e1, # At max_nu, kl_div < kl_step,\n 'step_tol': 0.1,\n 'bad_tol': 0.2,\n 'good_tol': 0.3,\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n }\n\n# Dynamics\nlearned_dynamics = {'type': DynamicsLRPrior,\n 'regularization': 1e-6,\n 'prior': {\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n 'strength': 1.0, # Adjusts the strength of the prior.\n },\n }\n\n# GPS algo hyperparameters\nmdgps_hyperparams = {'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add',\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n }\n\npigps_hyperparams = {'init_pol_wt': 0.01,\n 'policy_sample_mode': 'add'\n }\n\nilqr_hyperparams = {'inner_iterations': 1,\n }\n\npi2_hyperparams = {'inner_iterations': 1,\n 'fit_dynamics': False, # Dynamics fitting is not required for PI2.\n }\n\ndreps_hyperparams = {'inner_iterations': 1,\n 'good_samples': good_trajs,\n 'bad_samples': bad_trajs,\n }\n\nmdreps_hyperparams = {'inner_iterations': 1,\n 'good_samples': good_trajs,\n 'bad_samples': bad_trajs,\n 'n_bad_samples': 2, # Number of bad samples per each trajectory\n 'n_good_samples': 2, # Number of bad samples per each trajectory\n 'base_kl_bad': 2.5, # (chi) to be used with multiplier | kl_div_b >= kl_bad\n 'base_kl_good': 1.0, # (xi) to be used with multiplier | kl_div_g <= kl_good\n 'bad_traj_selection_type': 'always', # 'always', 'only_traj'\n 'good_traj_selection_type': 'always', # 'always', 'only_traj'\n 'init_eta': 4.62,\n 'init_nu': 0,#0.5,\n 'init_omega': 0,#1.0,\n 'min_good_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_good in LQR)\n 'max_good_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_good in LQR)\n 'min_bad_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_bad in LQR)\n 'max_bad_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_bad in LQR)\n 'min_good_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n 'min_bad_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n }\n\n\nif learning_algorithm.upper() == 'MDGPS':\n gps_algo_hyperparams = mdgps_hyperparams\n traj_opt_method = traj_opt_lqr\n test_after_iter = True\n sample_on_policy = False\n use_global_policy = True\n\nelif learning_algorithm.upper() == 'PIGPS':\n mdgps_hyperparams.update(pigps_hyperparams)\n gps_algo_hyperparams = mdgps_hyperparams\n traj_opt_method = traj_opt_pi2\n test_after_iter = True\n sample_on_policy = False\n use_global_policy = True\n\nelif learning_algorithm.upper() == 'ILQR':\n gps_algo_hyperparams = ilqr_hyperparams\n traj_opt_method = traj_opt_lqr\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\n\nelif learning_algorithm.upper() == 'PI2':\n gps_algo_hyperparams = pi2_hyperparams\n traj_opt_method = traj_opt_pi2\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\n\nelif learning_algorithm.upper() == 'DREPS':\n gps_algo_hyperparams = dreps_hyperparams\n traj_opt_method = traj_opt_dreps\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\n\nelif learning_algorithm.upper() == 'MDREPS':\n gps_algo_hyperparams = mdreps_hyperparams\n traj_opt_method = traj_opt_mdreps\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\nelse:\n raise AttributeError(\"Wrong learning algorithm %s\" % learning_algorithm.upper())\n\n\ngps_hyperparams = {\n 'T': int(EndTime/Ts), # Total points\n 'dt': Ts,\n 'iterations': 100, # 100 # 2000 # GPS episodes, \"inner iterations\" --> K iterations\n 'test_after_iter': test_after_iter, # If test the learned policy after an iteration in the RL algorithm\n 'test_samples': 2, # Samples from learned policy after an iteration PER CONDITION (only if 'test_after_iter':True)\n # Samples\n 'num_samples': 6, # 20 # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'sample_on_policy': sample_on_policy, # Whether generate on-policy samples or off-policy samples\n #'noise_var_scale': np.array([5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2]), # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n #'noise_var_scale': np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*10, # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n 'smooth_noise_var': 5.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': np.ones(action_dim), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n 'cost': cost_sum,\n # Conditions\n 'conditions': len(gym_env.get_conditions()), # Total number of initial conditions\n 'train_conditions': range(len(gym_env.get_conditions())), # Indexes of conditions used for training\n 'test_conditions': range(len(gym_env.get_conditions())), # Indexes of conditions used for testing\n # KL step (epsilon)\n 'kl_step': 0.2, # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step in LQR)\n 'max_step_mult': 10.0, # Previous 23/08 -> 1.0 #3 # 10.0, # Max possible value of step multiplier (multiplies kl_step in LQR)\n # Others\n 'gps_algo_hyperparams': gps_algo_hyperparams,\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': True,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-6, #1e-2,# 1e-6, # Max value for x0sigma in trajectories # TODO: CHECK THIS VALUE, maybe it is too low\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization # CHECK THIS VALUE!!!, I AM USING ZERO!!\n 'use_global_policy': use_global_policy,\n 'data_files_dir': data_files_dir,\n}\n\n\nif learning_algorithm.upper() == 'MDGPS':\n learn_algo = MDGPS(agent=gym_agent, env=gym_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'PIGPS':\n learn_algo = PIGPS(agent=gym_agent, env=gym_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'ILQR':\n learn_algo = ILQR(agent=gym_agent, env=gym_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'PI2':\n learn_algo = PI2(agent=gym_agent, env=gym_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'DREPS':\n learn_algo = DREPS(agent=gym_agent, env=gym_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'MDREPS':\n learn_algo = MDREPS(agent=gym_agent, env=gym_env, **gps_hyperparams)\n\nelse:\n raise AttributeError(\"Wrong learning algorithm %s\" % learning_algorithm.upper())\n\nprint(\"Learning algorithm: %s OK\\n\" % type(learn_algo))\n\n# import numpy as np\n# dX = gym_env.state_dim\n# dU = gym_env.action_dim\n# dO = gym_env.obs_dim\n# T = gps_hyperparams['T']\n# all_actions = np.zeros((T, dU))\n# all_states = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dX))\n# all_obs = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dO))\n# sample = Sample(gym_env, T)\n# sample.set_acts(all_actions) # Set all actions at the same time\n# sample.set_obs(all_obs) # Set all obs at the same time\n# sample.set_states(all_states) # Set all states at the same time\n# costs = learn_algo._eval_conditions_sample_list_cost([SampleList([sample])])\n# raw_input('zacataaaaaaaaa')\n\n\n# Optimize policy using learning algorithm\nprint(\"Running Learning Algorithm!!!\")\ntraining_successful = learn_algo.run(resume_training_itr)\nif training_successful:\n print(\"Learning Algorithm has finished SUCCESSFULLY!\")\nelse:\n print(\"Learning Algorithm has finished WITH ERRORS!\")\n\n\n# ############################## #\n# ############################## #\n# ## SAMPLE FROM FINAL POLICY ## #\n# ############################## #\n# ############################## #\nif training_successful:\n conditions_to_sample = gps_hyperparams['test_conditions']\n change_print_color.change('GREEN')\n n_samples = 1\n noisy = False\n sampler_hyperparams = {\n 'noisy': noisy,\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n 'T': int(EndTime/Ts)*1, # Total points\n 'dt': Ts\n }\n #sampler = Sampler(gym_agent.policy, gym_env, **sampler_hyperparams)\n sampler = Sampler(learn_algo.cur[0].traj_distr, gym_env, **sampler_hyperparams)\n print(\"Sampling from final policy!!!\")\n sample_lists = list()\n for cond_idx in conditions_to_sample:\n raw_input(\"\\nSampling %d times from condition %d and with policy:%s (noisy:%s). \\n Press a key to continue...\" %\n (n_samples, cond_idx, type(gym_agent.policy), noisy))\n sample_list = sampler.take_samples(n_samples, cond=cond_idx, noisy=noisy)\n # costs = learn_algo._eval_conditions_sample_list_cost([sample_list])\n # # print(costs)\n # # raw_input('pppp')\n # sample_lists.append(sample_list)\n\n gym_env.reset(time=1, cond=0)\n\n\n\n\nprint(\"The script has finished!\")\nos._exit(0)\n\n" }, { "alpha_fraction": 0.43780067563056946, "alphanum_fraction": 0.4927835166454315, "avg_line_length": 33.64285659790039, "blob_id": "cfd74e51193cab0432c73c3a01657bd434704ace", "content_id": "e72f9e15f587698c07da73d02981ae30ee5a59ee", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1455, "license_type": "permissive", "max_line_length": 92, "num_lines": 42, "path": "/scenarios/humanoids2018/plots/others/dual_2dtraj_updates.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom builtins import input\nfrom robolearn.old_utils.plots.dual_2dtraj_updates import plot_dual_2dtraj_updates\n\nmethod = 'gps' # 'gps' or 'trajopt'\ngps_directory_names = ['gps_log1', 'gps_log2', 'gps_log3']#, 'reacher_log2', 'reacher_log3']\ngps_models_labels = ['gps1', 'gps2', 'gps3']\nidx_to_plot = [6, 7] # dX + dU\ntgt_positions = [(0.70, -0.15),\n (0.63, -0.09),\n (0.51, 0.05),\n (0.51, -0.05),\n (0.51, 0.05)]\nobst_positions = [(0.74, 0.10),\n (0.73, -0.36),\n (0.65, -0.10),\n (0.61, 0.10),\n (0.62, -0.08)]\nsafe_distance = 0.15\n# itr_to_load = None # list(range(8))\nitr_to_load = None # list(range(8))\nblock = False\n\n# ---\nitr_to_plot = [1]\nitr_to_load = list(range(2))\n# ---\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\nplot_dual_2dtraj_updates(dir_names, idx_to_plot,\n itr_to_load=itr_to_load,\n itr_to_plot=itr_to_plot,\n method=method,\n gps_models_labels=gps_models_labels,\n tgt_positions=tgt_positions,\n obst_positions=obst_positions,\n safe_distance=safe_distance,\n block=block)\n\ninput('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.6194563508033752, "alphanum_fraction": 0.6266094446182251, "avg_line_length": 30.772727966308594, "blob_id": "7e3f4cfe414f641a21932a756dcd5925a0fb46be", "content_id": "8ae1b6ff8127bb68592508bc539efac4e12bd0d1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "permissive", "max_line_length": 70, "num_lines": 22, "path": "/robolearn/torch/utils/ops/size_splits.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\n\n\ndef size_splits(tensor, split_sizes, dim=0):\n \"\"\"Splits the tensor according to chunks of split_sizes.\n\n Arguments:\n tensor (Tensor): tensor to split.\n split_sizes (list(int)): sizes of chunks\n dim (int): dimension along which to split the tensor.\n \"\"\"\n if dim < 0:\n dim += tensor.dim()\n\n dim_size = tensor.size(dim)\n if dim_size != torch.sum(torch.Tensor(split_sizes)):\n raise KeyError(\"Sum of split sizes exceeds tensor dim\")\n\n splits = torch.cumsum(torch.Tensor([0] + split_sizes), dim=0)[:-1]\n\n return tuple(tensor.narrow(int(dim), int(start), int(length))\n for start, length in zip(splits, split_sizes))\n" }, { "alpha_fraction": 0.5801066756248474, "alphanum_fraction": 0.5995826721191406, "avg_line_length": 27.375, "blob_id": "710b4bd8847efb4185f51cd5cbb3179861559cb9", "content_id": "2cfe873aa0ae4df48674bfc451092c2c6b64210c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4313, "license_type": "permissive", "max_line_length": 90, "num_lines": 152, "path": "/examples/rl_algos/spinningup/centauro/load_sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import time\nimport joblib\nimport os\nimport os.path as osp\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom spinup import EpochLogger\nfrom spinup.utils.logx import restore_tf_graph\n\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.01\nFRAME_SKIP = 1\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 1\nPATHS_PER_EVAL = 2\nBATCH_SIZE = 128\n\nSEED = 1010\n# NP_THREADS = 6\n\nSUBTASK = None\n\n\ndef load_policy(fpath, itr='last', deterministic=False):\n # handle which epoch to load from\n if itr == 'last':\n saves = [int(x[11:])\n for x in os.listdir(fpath)\n if 'simple_save' in x and len(x) > 11]\n itr = '%d' % max(saves) if len(saves) > 0 else ''\n else:\n itr = '%d' % itr\n\n # load the things!\n sess = tf.Session()\n model = restore_tf_graph(sess, osp.join(fpath, 'simple_save'+itr))\n\n # get the correct op for executing actions\n if deterministic and 'mu' in model.keys():\n # 'deterministic' is only a valid option for SAC policies\n print('Using deterministic action op.')\n action_op = model['mu']\n else:\n print('Using default action op.')\n action_op = model['pi']\n\n # make function for producing an action given a single state\n get_action = lambda x: \\\n sess.run(action_op, feed_dict={model['x']: x[None, :]})[0]\n\n return get_action\n\n\ndef run_policy(env, policy, max_ep_len=None, num_episodes=100, render=True):\n\n logger = EpochLogger()\n obs, reward, done, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0\n while n < num_episodes:\n if render:\n env.render()\n time.sleep(1e-3)\n\n action = policy(obs)\n obs, reward, done, _ = env.step(action)\n ep_ret += reward\n ep_len += 1\n\n if done or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n print('Episode %d \\t EpRet %.3f \\t EpLen %d' % (n, ep_ret, ep_len))\n obs, reward, done, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n n += 1\n\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.dump_tabular()\n\n\ndef load_env(render=True):\n\n env_params = dict(\n is_render=True,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n active_joints='RA',\n control_mode='joint_tasktorque',\n # _control_mode='torque',\n balance_cost_weight=1.0,\n fall_cost_weight=1.0,\n tgt_cost_weight=3.0,\n # tgt_cost_weight=50.0,\n balance_done_cost=0., # 2.0*PATH_LENGTH, # TODO: dont forget same balance weight\n tgt_done_reward=0., # 20.0,\n ctrl_cost_weight=1.0e-1,\n use_log_distances=True,\n log_alpha_pos=1e-4,\n log_alpha_ori=1e-4,\n goal_tolerance=0.05,\n min_obj_height=0.60,\n max_obj_height=1.20,\n max_obj_distance=0.20,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n subtask=SUBTASK,\n random_init=True,\n seed=SEED,\n )\n\n env = NormalizedBoxEnv(\n CentauroTrayEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n\n return env\n\n\ndef main(args):\n policy = load_policy(args.dir, deterministic=args.deterministic)\n env = load_env(render=not args.norender)\n\n run_policy(env, policy, args.horizon, args.episodes, not args.norender)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', type=str, default='.',\n help='path to the tf directory')\n parser.add_argument('--horizon', '-H', type=int, default=1000)\n parser.add_argument('--episodes', '-n', type=int, default=100)\n parser.add_argument('--deterministic', '-d', action='store_true')\n parser.add_argument('--norender', '-nr', action='store_true')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.6334026455879211, "alphanum_fraction": 0.6583506464958191, "avg_line_length": 26.226415634155273, "blob_id": "17a7ade4ef9a705138aa18f2b2858e9922d48a14", "content_id": "294695bfd74efd953c7eb7bc4c35dc23e02db9c1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "permissive", "max_line_length": 105, "num_lines": 53, "path": "/robolearn/envs/simple_envs/frozen_lake/test_script.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "# Some basic imports and setup\nimport time\nimport numpy as np, numpy.random as nr, gym\nimport matplotlib.pyplot as plt\nnp.set_printoptions(precision=3)\nfrom robolearn.old_envs.frozen_lake import FrozenLakeEnv\nfrom robolearn.old_envs.frozen_lake import FrozenLakeMultiEnv\n\n\n\nrender = 'human'\n# render = 'rgb_array'\nenv = FrozenLakeEnv(map_name='4x4', is_slippery=False, reward_dict={'G': 1, 'S': 0, 'H': 0, 'F': 0})\n\nenv.seed(0); from gym.spaces import prng; prng.seed(10)\nif render != 'human':\n img_width = 240\n img_height = 240\n fig, ax = plt.subplots(1, 1)\n my_image = ax.imshow(0.5*np.ones((img_width, img_height, 3)), interpolation='nearest', animated=True)\n fig.canvas.draw()\n plt.ion()\n fig.show()\n\n# Generate the episode\nenv.reset()\nfor t in range(100):\n print('Iter %d' % t)\n time.sleep(0.5)\n rgb_image = env.render(mode=render)\n if render != 'human':\n my_image.set_data(rgb_image)\n ax.draw_artist(my_image)\n fig.canvas.blit(ax.bbox) # redraw the axes rectangle\n plt.pause(0.0001)\n\n a = env.action_space.sample()\n ob, rew, done, _ = env.step(a)\n if done:\n break\nassert done\n\nrgb_image = env.render(mode=render)\n\nif render != 'human':\n print(rgb_image.shape)\n my_image.set_data(rgb_image)\n ax.draw_artist(my_image)\n fig.canvas.blit(ax.bbox) # redraw the axes rectangle\n plt.pause(0.0001)\n\ninput('Press a key to close')\nenv.render(close=True)\n" }, { "alpha_fraction": 0.5412536263465881, "alphanum_fraction": 0.5444007515907288, "avg_line_length": 31.20270347595215, "blob_id": "1fc9e5cd95419d8d4d59a6a5879ed41d34a82836", "content_id": "e951c4b90b5bba2848af1c9f0e78b672ace89fa4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19065, "license_type": "permissive", "max_line_length": 78, "num_lines": 592, "path": "/robolearn/algorithms/rl_algos/rl_algorithm.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import abc\nfrom future.utils import with_metaclass\nimport pickle\nimport time\n\nimport gtimer as gt\nimport numpy as np\n\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils.data_management.path_builder import PathBuilder\nfrom robolearn.utils.samplers.in_place_path_sampler import InPlacePathSampler\nfrom robolearn.utils.samplers.finite_path_sampler import FinitePathSampler\n\nfrom collections import OrderedDict\nfrom robolearn.utils import eval_util\n\n\nclass RLAlgorithm(with_metaclass(abc.ABCMeta, object)):\n \"\"\"\n Base Reinforcement Learning algorithm interface.\n \"\"\"\n\n def __init__(\n self,\n explo_env,\n explo_policy,\n eval_env=None,\n eval_policy=None,\n finite_horizon_eval=False,\n obs_normalizer=None,\n\n algo_mode='online',\n num_epochs=100,\n num_steps_per_epoch=10000,\n num_steps_per_eval=1000,\n max_path_length=1000,\n\n min_steps_start_train=0,\n min_start_eval=0,\n\n num_updates_per_train_call=1,\n\n discount=0.99,\n\n render=False,\n\n save_algorithm=False,\n save_environment=True,\n\n epoch_plotter=None,\n ):\n # type: # (gym.Env, ExplorationPolicy) -> None\n \"\"\"\n Base class for RL Algorithms\n :param explo_env: Environment used for training.\n :param explo_policy: Policy used to explore during training\n (Behavior policy).\n :param eval_env: Environment used for evaluation. By default, a\n copy of `env` will be made.\n :param eval_policy: Policy to evaluate with.\n :param num_epochs: Number of training episodes.\n :param num_steps_per_epoch: Number of timesteps per epoch.\n :param num_steps_per_eval: Number of timesteps per evaluation\n :param num_updates_per_train_call: Used by online training mode.\n :param num_updates_per_epoch: Used by batch training mode.\n :param max_path_length: Max length of sampled path (rollout) from env.\n :param min_steps_start_train: Min steps to start training.\n :param min_start_eval: Min steps to start evaluating.\n :param discount: discount factor (gamma).\n :param render: Visualize or not the environment.\n :param save_algorithm: Save or not the algorithm after iterations.\n :param save_environment: Save or not the environment after iterations\n \"\"\"\n # Training environment, policy and state-action spaces\n self.explo_env = explo_env\n self.explo_policy = explo_policy\n self.action_space = explo_env.action_space\n self.obs_space = explo_env.observation_space\n self._obs_normalizer = obs_normalizer\n\n # Evaluation environment, policy and sampler\n self.eval_env = eval_env or pickle.loads(pickle.dumps(explo_env))\n if eval_policy is None:\n eval_policy = explo_policy\n self.eval_policy = eval_policy\n\n if finite_horizon_eval:\n self.eval_sampler = FinitePathSampler(\n env=eval_env,\n policy=eval_policy,\n total_paths=int(num_steps_per_eval/max_path_length),\n max_path_length=max_path_length,\n obs_normalizer=self._obs_normalizer,\n )\n else:\n self.eval_sampler = InPlacePathSampler(\n env=eval_env,\n policy=eval_policy,\n total_samples=num_steps_per_eval,\n max_path_length=max_path_length,\n obs_normalizer=self._obs_normalizer,\n )\n\n # RL algorithm hyperparameters\n self.num_epochs = num_epochs\n self.num_train_steps_per_epoch = num_steps_per_epoch\n self.max_path_length = max_path_length\n self.num_updates_per_train_call = num_updates_per_train_call\n self.num_steps_per_eval = num_steps_per_eval\n\n self._min_steps_start_train = min_steps_start_train\n self._min_steps_start_eval = min_start_eval\n\n # Reward related\n self.discount = discount\n\n # Flag to render while sampling\n self._render = render\n\n # Save flags\n self.save_algorithm = save_algorithm\n self.save_environment = save_environment\n\n # Internal variables\n self._n_epochs = 0\n self._n_env_steps_total = 0 # Accumulated interactions with the env\n self._n_total_train_steps = 0 # Accumulated total training steps\n self._n_epoch_train_steps = 0 # Accumulated epoch's training steps\n self._n_rollouts_total = 0 # Accumulated rollouts\n self._epoch_start_time = None # Wall time\n self._current_path = PathBuilder() # Current path\n self._exploration_paths = [] # All paths in current epoch\n self.post_epoch_funcs = [] # Fcns to call at the end of episode\n\n if algo_mode not in ['online', 'episode']:\n raise TypeError(\"Invalid algo_mode: %s\" % self.algo_mode)\n self._algo_mode = algo_mode\n\n # Logger variables\n self.eval_statistics = None # Dict of stats from evaluation\n self._old_table_keys = None # Previous table keys of the logger\n self._epoch_plotter = epoch_plotter\n\n \"\"\"\n ############################\n ############################\n Methods related to Training.\n ############################\n ############################\n \"\"\"\n def pretrain(self, *args, **kwargs):\n \"\"\"\n Do anything before the training phase.\n \"\"\"\n pass\n\n def train(self, start_epoch=0):\n # Get snapshot of initial algo state\n if start_epoch == 0:\n self._log_initial_data()\n\n self.training_mode(False)\n self._n_env_steps_total = start_epoch * self.num_train_steps_per_epoch\n\n gt.reset()\n gt.set_def_unique(False)\n\n self._current_path = PathBuilder()\n for epoch in gt.timed_for(\n range(start_epoch, self.num_epochs),\n save_itrs=True,\n ):\n self._start_epoch(epoch)\n obs = self._start_new_rollout()\n for ss in range(self.num_train_steps_per_epoch):\n obs = self._take_step_in_env(obs)\n gt.stamp('sample')\n\n if self._algo_mode == 'online':\n self._try_to_train()\n gt.stamp('train')\n\n if self._algo_mode == 'episode':\n self._try_to_train()\n gt.stamp('train')\n\n self._try_to_eval(epoch)\n gt.stamp('eval')\n self._end_epoch(epoch)\n\n def training_mode(self, mode):\n \"\"\"\n Set training mode to `mode`.\n :param mode: If True, training will happen (e.g. set the dropout\n probabilities to not all ones).\n \"\"\"\n pass\n\n def _take_step_in_env(self, observation):\n # Get policy action\n if self._obs_normalizer is None:\n policy_input = observation\n else:\n policy_input = self._obs_normalizer.normalize(observation)\n action, pol_info = self.explo_policy.get_action(\n policy_input,\n )\n\n if self._render:\n self.explo_env.render()\n\n # Interact with environment\n next_obs, reward, terminal, env_info = (\n self.explo_env.step(action)\n )\n\n # Increase counter\n self._n_env_steps_total += 1\n\n terminal = np.array([terminal])\n reward = np.array([reward])\n\n self._handle_step(\n observation,\n action,\n reward,\n next_obs,\n terminal,\n agent_info=pol_info,\n env_info=env_info,\n )\n\n # Check it we need to start a new rollout\n if terminal or (len(self._current_path) >=\n self.max_path_length):\n self._end_rollout()\n next_obs = self._start_new_rollout()\n\n return next_obs\n\n def _try_to_train(self):\n \"\"\"\n Check if the requirements are fulfilled to start or not training.\n Returns:\n\n \"\"\"\n if self._can_train():\n self.training_mode(True)\n for i in range(self.num_updates_per_train_call):\n # Call algorithm-specific training method\n self._do_training()\n self._n_epoch_train_steps += 1\n self._n_total_train_steps += 1\n self.training_mode(False)\n else:\n self._not_do_training()\n\n def _can_train(self):\n \"\"\"\n Training requirements are fulfilled or not.\n\n Train only if you have more data than the batch size in the\n Replay Buffer.\n\n :return (bool):\n \"\"\"\n # Bigger than, because n_env_steps_total updated after sampling\n return self._n_env_steps_total > self._min_steps_start_train\n\n @abc.abstractmethod\n def _do_training(self):\n \"\"\"\n Perform some computations, e.g. perform one gradient step.\n :return:\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _not_do_training(self):\n \"\"\"\n Perform something when it is not possible to do training.\n :return:\n \"\"\"\n pass\n\n def _start_epoch(self, epoch):\n \"\"\"\n Computations at the beginning of an epoch.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n self._epoch_start_time = time.time()\n self._exploration_paths = []\n self._n_epochs = epoch + 1\n self._n_epoch_train_steps = 0\n logger.push_prefix('Iteration #%d | ' % epoch)\n\n def _end_epoch(self, epoch):\n \"\"\"\n Computations at the end of an epoch.\n Returns:\n\n \"\"\"\n logger.log(\"Epoch Duration: {0}\".format(\n time.time() - self._epoch_start_time\n ))\n logger.log(\"Started Training: {0}\".format(self._can_train()))\n logger.pop_prefix()\n\n for post_epoch_func in self.post_epoch_funcs:\n post_epoch_func(self, epoch)\n\n def _start_new_rollout(self):\n \"\"\"\n Computations at the beginning of every rollout.\n Returns:\n\n \"\"\"\n self.explo_policy.reset()\n return self.explo_env.reset()\n\n def _end_rollout(self):\n \"\"\"\n Computations at the end of every rollout.\n \"\"\"\n self._n_rollouts_total += 1\n if len(self._current_path) > 0:\n self._exploration_paths.append(\n self._current_path.get_all_stacked()\n )\n self._current_path = PathBuilder()\n\n def _handle_path(self, path):\n \"\"\"\n Computations for a path.\n :param path:\n :return:\n \"\"\"\n for (\n ob,\n action,\n reward,\n next_ob,\n terminal,\n agent_info,\n env_info\n ) in zip(\n path[\"observations\"],\n path[\"actions\"],\n path[\"rewards\"],\n path[\"next_observations\"],\n path[\"terminals\"],\n path[\"agent_infos\"],\n path[\"env_infos\"],\n ):\n self._handle_step(\n ob,\n action,\n reward,\n next_ob,\n terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n self._end_rollout()\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n agent_info,\n env_info,\n ):\n \"\"\"\n Computations at every step.\n :return:\n \"\"\"\n # Add data to current path builder\n self._current_path.add_all(\n observations=observation,\n actions=action,\n rewards=reward,\n next_observations=next_observation,\n terminals=terminal,\n agent_infos=agent_info,\n env_infos=env_info,\n )\n\n \"\"\"\n ##############################\n ##############################\n Methods related to Evaluation.\n ##############################\n ##############################\n \"\"\"\n def _try_to_eval(self, epoch, eval_paths=None):\n \"\"\"\n Check if the requirements are fulfilled to start or not an evaluation.\n Args:\n epoch (int): Epoch\n\n Returns:\n\n \"\"\"\n logger.save_extra_data(self.get_extra_data_to_save(epoch))\n if self._can_evaluate():\n # Call algorithm-specific evaluate method\n self.evaluate(epoch)\n\n self._log_data(epoch)\n else:\n logger.log(\"Skipping eval for now.\")\n\n def _can_evaluate(self):\n \"\"\"\n Evaluation requirements are fulfilled or not.\n\n Evaluate only if you have non-zero exploration paths AND you have\n more steps than _min_steps_start_eval. This value can be the minimum\n buffer size in the Replay Buffer.\n\n One annoying thing about the logger table is that the keys at each\n iteration need to be the exact same. So unless you can compute\n everything, skip evaluation.\n\n :return:\n \"\"\"\n return (\n len(self._exploration_paths) > 0\n and self._n_epoch_train_steps >= self._min_steps_start_eval\n )\n\n def evaluate(self, epoch):\n \"\"\"\n Evaluate the policy, e.g. save/print progress.\n :param epoch:\n :return:\n \"\"\"\n if self.eval_statistics is None:\n self.eval_statistics = OrderedDict()\n\n statistics = OrderedDict()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n logger.log(\"Collecting samples for evaluation\")\n test_paths = self.eval_sampler.obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Test\",\n ))\n\n if self._exploration_paths:\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n else:\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Exploration\",\n ))\n\n if hasattr(self.explo_env, \"log_diagnostics\"):\n self.explo_env.log_diagnostics(test_paths)\n\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n self._epoch_plotter.save_figure(epoch)\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Data to save in file.\n :param epoch:\n :return:\n \"\"\"\n data_to_save = dict(\n epoch=epoch,\n exploration_policy=self.explo_policy,\n )\n\n if self.save_environment:\n data_to_save['env'] = self.explo_env\n\n return data_to_save\n\n def get_extra_data_to_save(self, epoch):\n \"\"\"\n Save things that shouldn't be saved every snapshot but rather\n overwritten every time.\n :param epoch:\n :return:\n \"\"\"\n data_to_save = dict(\n epoch=epoch,\n )\n if self.save_environment:\n data_to_save['env'] = self.explo_env\n if self.save_algorithm:\n data_to_save['algorithm'] = self\n return data_to_save\n\n def _log_initial_data(self):\n self.training_mode(False)\n\n params = self.get_epoch_snapshot(-1)\n logger.save_itr_params(-1, params)\n\n self.evaluate(-1)\n logger.record_tabular(\"Number of train steps total\", 0)\n logger.record_tabular(\"Number of env steps total\", 0)\n logger.record_tabular(\"Number of rollouts total\", 0)\n logger.record_tabular('Train Time (s)', 0)\n logger.record_tabular('(Previous) Eval Time (s)', 0)\n logger.record_tabular('Sample Time (s)', 0)\n logger.record_tabular('Epoch Time (s)', 0)\n logger.record_tabular('Total Train Time (s)', 0)\n logger.record_tabular(\"Epoch\", 0)\n\n logger.dump_tabular(with_prefix=False, with_timestamp=False,\n write_header=True)\n\n def _log_data(self, epoch, write_header=False):\n # Update logger parameters with algorithm-specific variables\n params = self.get_epoch_snapshot(epoch)\n logger.save_itr_params(epoch, params)\n\n # Check that logger parameters (table keys) did not change.\n table_keys = logger.get_table_key_set()\n if self._old_table_keys is not None:\n if not table_keys == self._old_table_keys:\n error_text = \"Table keys cannot change from iteration \" \\\n \"to iteration.\\n\"\n error_text += 'table_keys: '\n error_text += str(table_keys)\n error_text += '\\n'\n error_text += 'old_table_keys: '\n error_text += str(self._old_table_keys)\n error_text += 'not in new: '\n error_text += str(np.setdiff1d(list(table_keys),\n list(self._old_table_keys))\n )\n error_text += 'not in old:'\n error_text += str(np.setdiff1d(list(self._old_table_keys),\n list(table_keys))\n )\n raise AttributeError(error_text)\n self._old_table_keys = table_keys\n\n # Add the number of steps to the logger\n logger.record_tabular(\n \"Number of train steps total\",\n self._n_total_train_steps,\n )\n logger.record_tabular(\n \"Number of env steps total\",\n self._n_env_steps_total,\n )\n logger.record_tabular(\n \"Number of rollouts total\",\n self._n_rollouts_total,\n )\n\n # Get useful times\n times_itrs = gt.get_times().stamps.itrs\n train_time = times_itrs['train'][-1]\n sample_time = times_itrs['sample'][-1]\n # eval_time = times_itrs['eval'][-1] if epoch > 0 else 0\n eval_time = times_itrs['eval'][-1] if 'eval' in times_itrs else 0\n epoch_time = train_time + sample_time + eval_time\n total_time = gt.get_times().total\n\n # Add the previous times to the logger\n logger.record_tabular('Train Time (s)', train_time)\n logger.record_tabular('(Previous) Eval Time (s)', eval_time)\n logger.record_tabular('Sample Time (s)', sample_time)\n logger.record_tabular('Epoch Time (s)', epoch_time)\n logger.record_tabular('Total Train Time (s)', total_time)\n\n # Add the number of epoch to the logger\n logger.record_tabular(\"Epoch\", epoch)\n\n # Dump the logger data\n logger.dump_tabular(with_prefix=False, with_timestamp=False,\n write_header=write_header)\n\n" }, { "alpha_fraction": 0.6199424266815186, "alphanum_fraction": 0.6317927837371826, "avg_line_length": 26.60280418395996, "blob_id": "f7f4a163a76522d9d87bb0187f6cfedc4f7295f8", "content_id": "33db26ede8eeaecc32ec35e13f869cc98455123d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5907, "license_type": "permissive", "max_line_length": 68, "num_lines": 214, "path": "/examples/miscellaneous/test_composed_multipol.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom robolearn.torch.policies import TanhGaussianComposedMultiPolicy\n\nbatch_size = 100\nobs_dim = 9\naction_dim = 6\nn_policies = 2\nlatent_dim = 4\n\nnn_pol = TanhGaussianComposedMultiPolicy(\n obs_dim,\n action_dim,\n n_policies,\n latent_dim,\n shared_hidden_sizes=[20],\n unshared_hidden_sizes=[30],\n unshared_mix_hidden_sizes=[40],\n unshared_policy_hidden_sizes=[50],\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=1e-2,\n output_w_init='xavier_normal',\n output_b_init_val=1e-2,\n pol_output_activation='linear',\n mix_output_activation='linear',\n final_pol_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n final_policy_layer_norm=False,\n reparameterize=True,\n epsilon=1e-6,\n softmax_weights=False,\n mixing_temperature=1.,\n)\n\nprint('##'*10)\nprint(nn_pol)\n# input('Press a key to continue...')\nprint('\\n')\nprint('##'*10)\nprint('MODULE PARAMETERS:')\nfor name, p in nn_pol.named_parameters():\n print(name, p.shape)\n# input('Press a key to continue...')\nprint('\\n')\nprint('##'*10)\nprint('SHARED PARAMETERS:')\nfor name, p in nn_pol.named_shared_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\n# input('Press a key to continue...')\nprint('\\n')\nprint('##'*10)\nprint('MIXING PARAMETERS:')\nfor name, p in nn_pol.named_mixing_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\n# input('Press a key to continue...')\nprint('\\n')\nprint('##'*10)\nprint('ALL POLICIES PARAMETERS:')\nfor name, p in nn_pol.named_policies_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\n# input('Press a key to continue...')\nprint('\\n')\nprint('##'*10)\nprint('SPECIFIC POLICY PARAMETERS:')\nfor pol_idx in range(nn_pol.n_heads):\n print('--- POLICY ', pol_idx, ' ---')\n for name, p in nn_pol.named_policies_parameters(idx=pol_idx):\n print(name, p.shape)\n print(p.data)\n print('.')\n# input('Press a key to continue...')\nprint('\\n')\nprint('##'*10)\nprint('FINAL POLICY PARAMETERS:')\nfor name, p in nn_pol.named_final_policy_parameters():\n print(name, p.shape)\n print(p.data)\n print('.')\n# input('Press a key to continue...')\nprint('\\n')\nprint('##\\n'*5)\nprint('ALL PARAMETERS:')\nfor param in nn_pol.parameters():\n print(param.shape)\n\nprint('##\\n'*5)\n# input(\"Press a key to start training...\")\n\n\nobs = torch.rand((batch_size, obs_dim))\n\nact_des = torch.rand((batch_size, action_dim))\n# act_des = torch.tensor([[0.1],\n# [0.1],\n# [0.3]])\nact_des = act_des.uniform_(-1, 1)\n\n# o = nn_pol(a, _val_idxs=[0], deterministic=True)\n# error = torch.sum(b0 - o[0][0])\n\nloss_fn = torch.nn.MSELoss(size_average=False)\nlearning_rate = 1e-2\noptimizer_pol = torch.optim.Adam([\n {'params': nn_pol.mixing_parameters(),\n 'lr': learning_rate},\n {'params': nn_pol.policies_parameters(),\n 'lr': learning_rate},\n {'params': nn_pol.shared_parameters(),\n 'lr': learning_rate},\n])\n\nprint('obs shape:', obs.shape)\nprint('action shape:', act_des.shape)\n\n\nshared_params_initial = list()\nfor param in nn_pol.shared_parameters():\n shared_params_initial.append(param.data.clone())\npolicies_params_initial = list()\nfor param in nn_pol.policies_parameters():\n policies_params_initial.append(param.data.clone())\nmixing_params_initial = list()\nfor param in nn_pol.mixing_parameters():\n mixing_params_initial.append(param.data.clone())\n\noutput_initial = nn_pol(obs, deterministic=True)\n\nfor tt in range(1000):\n act_pred, policy_info = nn_pol(obs, deterministic=False,\n optimize_policies=True,\n return_log_prob=True)\n\n log_pi = policy_info['log_prob']\n policy_mean = policy_info['mean']\n policy_log_std = policy_info['log_std']\n pre_tanh_value = policy_info['pre_tanh_value']\n print('ent:', log_pi.mean())\n\n # loss = loss_fn(act_pred, act_des)\n loss = loss_fn(log_pi, act_des[:, 0].unsqueeze(dim=-1))\n # loss = loss_fn(policy_mean, act_des)\n # loss = loss_fn(policy_log_std, act_des)\n # loss = loss_fn(pre_tanh_value, act_des)\n\n print('t=', tt, '| loss=', loss.item())\n\n optimizer_pol.zero_grad()\n loss.backward()\n\n if tt == 0:\n print('Showing the gradients')\n for name, param in nn_pol.named_parameters():\n print('----')\n print(name, '\\n', param.grad)\n # input('Press a key to continue training...')\n\n optimizer_pol.step()\n\n# error.backward()\n\nprint('='*10)\n# print('='*10)\n# output = nn_pol(obs, deterministic=True)\n# print('Initial output')\n# for key, val in output_initial[1].items():\n# print(key, '\\n', val)\n# print('==')\n# print('Final output')\n# for key, val in output[1].items():\n# print(key, '\\n', val)\n# print('action_des', act_des)\n# print('action_pred_initial', output_initial[0])\n# print('action_pred', output[0])\n# print('action_one_by_one')\nfor ii in range(batch_size):\n print(ii, '-->', nn_pol(obs[ii], deterministic=True)[0])\n\ninput('Show parameters...')\n\nprint('##\\n'*2)\n\nshared_params_final = list()\nfor param in nn_pol.shared_parameters():\n shared_params_final.append(param.data.clone())\npolicies_params_final = list()\nfor param in nn_pol.policies_parameters():\n policies_params_final.append(param.data.clone())\nmixing_params_final = list()\nfor param in nn_pol.mixing_parameters():\n mixing_params_final.append(param.data.clone())\n\nprint('##\\n'*2)\nprint('LOSS', loss)\nfor name, param in nn_pol.named_parameters():\n print('--')\n print('NAME', name)\n print('DATA', param.data)\n print('GRAD', param.grad)\n\nprint('init_shared')\nprint([pp.mean() for pp in shared_params_initial])\nprint('final_shared')\nprint([pp.mean() for pp in shared_params_final])\ninput('Press a key to FINISH THE SCRIPT')\n" }, { "alpha_fraction": 0.635918915271759, "alphanum_fraction": 0.649895191192627, "avg_line_length": 45.129032135009766, "blob_id": "4a9860bef7d4552110585e9243c93e859f25288d", "content_id": "15ac8a7061b1f0545e3258bc305b3af5fba150a3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 92, "num_lines": 31, "path": "/scenarios/humanoids2018/plots/others/avg_specific_costs.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom robolearn.old_utils.plots.policy_cost import plot_policy_cost\nfrom robolearn.old_utils.plots.avg_specific_costs import plot_avg_specific_costs\nfrom robolearn.old_utils.plots.duals import plot_duals\n\nmethod = 'gps' # 'gps' or 'trajopt'\ngps_directory_names = ['gps_log4', 'gps_log7', 'gps_log8']#, 'reacher_log2', 'reacher_log3']\ngps_models_labels = ['MDGPS', 'MDGPS no 1/6 worst', 'MDGPS no 2/6 worst']\nitr_to_load = None # list(range(8))\nblock = False\nspecific_costs = [3, 4] #None # None for all costs\nlatex_plot = True\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\n# conds_to_combine = list(range(12))\nconds_to_combine = list(range(4))\nplot_avg_specific_costs(dir_names, itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels, block=block,\n conds_to_combine=conds_to_combine,\n specific_costs=specific_costs, latex_plot=latex_plot)\n\nconds_to_combine = list(range(12, 15))\nconds_to_combine = list([4])\nplot_avg_specific_costs(dir_names, itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels, block=block,\n conds_to_combine=conds_to_combine,\n specific_costs=specific_costs, latex_plot=latex_plot)\n\ninput('Showing plots. Press a key to close...')\n\n" }, { "alpha_fraction": 0.654554009437561, "alphanum_fraction": 0.6748466491699219, "avg_line_length": 41.380001068115234, "blob_id": "4b7469894a62baf7ea25910d7be01670ca79f00c", "content_id": "e3bb6c1a7562a876a4f27d9fef6a2bb465bec383", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2119, "license_type": "permissive", "max_line_length": 117, "num_lines": 50, "path": "/scenarios/tests/load_plot_sample_list.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom robolearn.old_utils.plot_utils import plot_sample_list, plot_sample_list_distribution, plot_sample\n\n# gps_directory_name = 'GPS_2017-07-13_11:30:33'\ngps_directory_name = 'GPS_2017-07-14_10:05:47'\n\ngps_itr = 5 # GPS Iteration number\nsample_number = 0 # If None, plot all the samples and show their mean, min and max\npol_sample = True # If false, load the traj_sample\n\nplot_states = True\nplot_actions = True\nplot_obs = False\n\n\ngps_path = '/home/desteban/workspace/robolearn/scenarios/' + gps_directory_name\n\nif pol_sample:\n sample_list = pickle.load(open(gps_path+'/pol_sample_itr_'+str('%02d' % gps_itr)+'.pkl', 'rb'))\nelse:\n sample_list = pickle.load(open(gps_path+'/traj_sample_itr_'+str('%02d' % gps_itr)+'.pkl', 'rb'))\n\ntotal_conditions = len(sample_list)\n\n# for cond in range(total_conditions):\n# plot_sample_list(sample_list[cond], data_to_plot='actions', block=False, cols=3)\n# #plot_sample_list(sample_list[cond], data_to_plot='states', block=False, cols=3)\n# #plot_sample_list(sample_list[cond], data_to_plot='obs', block=False, cols=3)\n# raw_input('Showing plots')\n\nfor cond in range(total_conditions):\n if plot_actions:\n if sample_number is None:\n plot_sample_list_distribution(sample_list[cond], data_to_plot='actions', block=False, cols=3)\n else:\n plot_sample(sample_list[cond][sample_number], data_to_plot='actions', block=False, cols=3, color='black')\n if plot_states:\n if sample_number is None:\n plot_sample_list_distribution(sample_list[cond], data_to_plot='states', block=False, cols=3)\n else:\n plot_sample(sample_list[cond][sample_number], data_to_plot='states', block=False, cols=3, color='green')\n if plot_obs:\n if sample_number is None:\n plot_sample_list_distribution(sample_list[cond], data_to_plot='obs', block=False, cols=3)\n else:\n plot_sample(sample_list[cond][sample_number], data_to_plot='obs', block=False, cols=3, color='blue')\n\nraw_input('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5185873508453369, "alphanum_fraction": 0.5254027247428894, "avg_line_length": 30.038461685180664, "blob_id": "6fa0b67668768c5e0f655ae28ff7f1b3d990871c", "content_id": "cfbf7f1a7981fea664a3d27d763a6354a6b9de84", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/robolearn/torch/models/values/nn_vfunction.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.torch.utils.nn import Mlp\nfrom robolearn.utils.serializable import Serializable\nfrom robolearn.models import VFunction\n\n\nclass NNVFunction(Mlp, Serializable, VFunction):\n def __init__(self,\n obs_dim,\n hidden_sizes=(100, 100),\n **kwargs):\n VFunction.__init__(self,\n obs_dim=obs_dim)\n\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n self.save_init_params(locals())\n Mlp.__init__(self,\n hidden_sizes=hidden_sizes,\n input_size=obs_dim,\n output_size=1,\n **kwargs\n )\n\n def get_value(self, obs_np, **kwargs):\n values, info_dict = \\\n self.get_values(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n\n return values[0, :], info_dict\n\n def get_values(self, obs_np, **kwargs):\n return self.eval_np(obs_np, **kwargs)\n\n def forward(self, obs, return_preactivations=False):\n nn_ouput = Mlp.forward(self, obs,\n return_preactivations=return_preactivations)\n\n if return_preactivations:\n value = nn_ouput[0]\n pre_activations = nn_ouput[1]\n info_dict = dict(\n pre_activations=pre_activations,\n )\n else:\n value = nn_ouput\n info_dict = dict()\n\n return value, info_dict\n" }, { "alpha_fraction": 0.5406504273414612, "alphanum_fraction": 0.5447154641151428, "avg_line_length": 30.465116500854492, "blob_id": "fb27e4bb5422e2f35712b73188d4b10fffbcc7e0", "content_id": "62f94a27edc923c212e79232b4e5a9ab27cbb515", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2706, "license_type": "permissive", "max_line_length": 80, "num_lines": 86, "path": "/robolearn/torch/policies/sampling_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nfrom torch.distributions import Normal\n\nfrom robolearn.torch.utils.nn import Mlp\nfrom robolearn.models.policies import ExplorationPolicy\nimport robolearn.torch.utils.pytorch_util as ptu\n\n\nclass SamplingPolicy(Mlp, ExplorationPolicy):\n \"\"\"Sampling NN policy.\"\"\"\n def __init__(self,\n obs_dim,\n action_dim,\n hidden_sizes,\n squash=True,\n **kwargs\n ):\n self.save_init_params(locals()) # For serialization\n\n # MLP Init\n super(SamplingPolicy, self).__init__(\n hidden_sizes,\n input_size=obs_dim + action_dim, # +action_dim id for stochasticity\n output_size=action_dim,\n **kwargs\n )\n\n self._action_dim = action_dim\n self._latent_dist = Normal(torch.Tensor([0]), torch.Tensor([1]))\n\n self._squash = squash\n\n # # TODO: WE ARE INITIALIZING LAST LAYER WEIGHTS WITH XAVIER\n # nn_pol.init.xavier_normal_(self.last_pfcs.weight.data)\n # # self.last_pfcs.bias.data.zero_()\n\n def get_action(self, obs_np, deterministic=False):\n # TODO: CHECK IF INDEX 0\n actions, info_dict = self.get_actions(obs_np[None],\n deterministic=deterministic)\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, deterministic=False):\n return self.eval_np(obs_np, deterministic=deterministic)\n\n def forward(\n self,\n obs,\n deterministic=False,\n ):\n \"\"\"\n :param obs: Observation\n :param deterministic: If True, do not sample\n \"\"\"\n # TODO: HOW TO DETERMINISTIC???\n latent_shape = (*list(obs.shape)[:-1], self._action_dim)\n if deterministic:\n latent = torch.zeros(latent_shape)\n else:\n latent = self._latent_dist.sample(latent_shape).squeeze(-1)\n\n if ptu.gpu_enabled():\n latent = latent.cuda()\n\n h = torch.cat([obs, latent], dim=-1)\n # print('--- INPUT ---')\n # print(torch.cat([obs, latent], dim=-1)[:5, :])\n for i, fc in enumerate(self.fcs):\n # h = self.hidden_activation(fc(h))\n h = fc(h)\n if self.layer_norm and i < len(self.fcs) - 1:\n h = self.layer_norms[i](h)\n h = self.hidden_activation(h)\n\n action = self.last_fc(h)\n\n if self._squash:\n action = torch.tanh(action)\n\n info_dict = dict()\n\n return action, info_dict\n" }, { "alpha_fraction": 0.5246737003326416, "alphanum_fraction": 0.5307226777076721, "avg_line_length": 39.79220962524414, "blob_id": "f26b507db320c6affb7cc2c928f82412010143b8", "content_id": "9a35ca00cef203b2852705fc0188ec87e03c2876", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3141, "license_type": "permissive", "max_line_length": 83, "num_lines": 77, "path": "/robolearn/torch/utils/data_management/simple_replay_buffer.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom robolearn.torch.utils import pytorch_util as ptu\n\nfrom robolearn.utils.data_management.replay_buffer import ReplayBuffer\n\n\nclass SimpleReplayBuffer(ReplayBuffer):\n def __init__(\n self, max_size, obs_dim, action_dim,\n ):\n if not max_size > 1:\n raise ValueError(\"Invalid Maximum Replay Buffer Size: {}\".format(\n max_size)\n )\n\n max_size = int(max_size)\n\n self._obs_buffer = torch.zeros((max_size, obs_dim),\n dtype=torch.float32,\n device=ptu.device)\n self._next_obs_buffer = torch.zeros((max_size, obs_dim),\n dtype=torch.float32,\n device=ptu.device)\n self._acts_buffer = torch.zeros((max_size, action_dim),\n dtype=torch.float32,\n device=ptu.device)\n self._rewards_buffer = torch.zeros((max_size, 1),\n dtype=torch.float32,\n device=ptu.device)\n self._terminals_buffer = torch.zeros((max_size, 1),\n dtype=torch.float32,\n device=ptu.device)\n\n self._obs_dim = obs_dim\n self._action_dim = action_dim\n self._max_size = max_size\n self._top = 0\n self._size = 0\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n self._obs_buffer[self._top] = torch.as_tensor(observation)\n self._acts_buffer[self._top] = torch.as_tensor(action)\n self._rewards_buffer[self._top] = torch.as_tensor(reward)\n self._terminals_buffer[self._top] = torch.as_tensor(terminal.astype(float))\n self._next_obs_buffer[self._top] = torch.as_tensor(next_observation)\n self._advance()\n\n def terminate_episode(self):\n pass\n\n def _advance(self):\n self._top = (self._top + 1) % self._max_size\n if self._size < self._max_size:\n self._size += 1\n\n def random_batch(self, batch_size):\n if batch_size > self._size:\n raise AttributeError('Not enough samples to get. %d bigger than '\n 'current %d!' % (batch_size, self._size))\n\n indices = torch.randint(0, self._size, (batch_size,), dtype=torch.long,\n device=ptu.device)\n return dict(\n observations=self.buffer_index(self._obs_buffer, indices),\n actions=self.buffer_index(self._acts_buffer, indices),\n rewards=self.buffer_index(self._rewards_buffer, indices),\n terminals=self.buffer_index(self._terminals_buffer, indices),\n next_observations=self.buffer_index(self._next_obs_buffer, indices),\n )\n\n def available_samples(self):\n return self._size\n\n @staticmethod\n def buffer_index(buffer, indices):\n return torch.index_select(buffer, dim=0, index=indices)\n" }, { "alpha_fraction": 0.6812227368354797, "alphanum_fraction": 0.6986899375915527, "avg_line_length": 15.357142448425293, "blob_id": "13103586232382fa05fca13b87b7e75eecc9266d", "content_id": "f1572ea2174426e75d1eb20a6cf58330543f5113", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "permissive", "max_line_length": 46, "num_lines": 14, "path": "/examples/miscellaneous/progress_bar.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import time\nfrom robolearn.utils.stdout import ProgressBar\n\nmax_value = 67\ntitle = \"Dummy bar\"\n\nbar = ProgressBar(max_value, bar_title=title)\n\nfor i in range(max_value):\n time.sleep(0.1)\n bar.update(i)\n\nbar.end()\nprint('')\n" }, { "alpha_fraction": 0.7924528121948242, "alphanum_fraction": 0.7924528121948242, "avg_line_length": 25.5, "blob_id": "dfafe8b6d8bff24bc8098ef28e165aae17b6b6e8", "content_id": "caa7fda6c12d36144b767e6c4f5565be354892f1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "permissive", "max_line_length": 29, "num_lines": 2, "path": "/robolearn/torch/algorithms/rl_algos/ddpg/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .ddpg import DDPG\nfrom .hiu_ddpg import HIUDDPG\n" }, { "alpha_fraction": 0.5892314314842224, "alphanum_fraction": 0.6009552478790283, "avg_line_length": 31.91428565979004, "blob_id": "c2ce87796c733770105fa7a817ec7735a3dd6da2", "content_id": "b4be4c3324a31c4593a508c3b8b0c33b3237dbb9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2303, "license_type": "permissive", "max_line_length": 80, "num_lines": 70, "path": "/robolearn/torch/algorithms/rl_algos/sql/kernel.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Haarnoja's TF implementation\n\nhttps://github.com/haarnoja/softqlearning\n\"\"\"\n\nimport numpy as np\nimport torch\n\n\ndef adaptive_isotropic_gaussian_kernel(xs, ys, h_min=1e-3):\n \"\"\"Gaussian kernel with dynamic bandwith.\n\n The bandwith is adjusted dynamically to match median_distance / log(Kx).\n See [2] for more information.\n\n Args:\n xs (`torch.Tensor`): A tensor of shape (N x Kx x D) containing N sets of\n Kx particles of dimension D. This is the first kernel argument.\n ys (`torch.Tensor`): A tensor of shape (N x Ky x D) containing N sets of\n Ky particles of dimension D. This is the second kernel argument.\n h_min (`float`): Minimum bandwith.\n\n Returns:\n `dict`: Returned dictionary has two fields:\n `output`: A `torch.Tensor` object of shape (N x Kx x Ky)\n representing the kernel matrix for inputs `xy` and `ys`.\n `gradient`: A `torch.Tensor`\n \"\"\"\n Kx, D = xs.shape[-2:]\n Ky, D2 = ys.shape[-2:]\n assert D == D2\n\n leading_shape = list(xs.shape[:-2])[-1]\n\n # Compute the pairwise distances of left and right particles.\n diff = torch.unsqueeze(xs, -2) - torch.unsqueeze(ys, -3)\n # ... x Kx x Ky x D\n\n dist_sq = torch.sum(diff**2, dim=-1, keepdim=False)\n # ... x Kx x Ky\n\n # Get median.\n input_shape = (leading_shape, Kx * Ky)\n type(leading_shape)\n values, _ = torch.topk(\n dist_sq.view(*input_shape),\n k=(Kx * Ky // 2 + 1), # This is exactly true only if Kx*Ky is odd.\n largest=True,\n sorted=True # ... x floor(Ks*Kd/2)\n )\n\n medians_sq = values[..., -1] # ... (shape) (last element is the median)\n\n h = medians_sq / np.log(Kx) # ... (shape)\n h = torch.clamp(h, min=h_min)\n h.detach_() # TODO: We can have a problem if inputs are not Variable\n h_expanded_twice = torch.unsqueeze(torch.unsqueeze(h, -1), -1)\n # ... x 1 x 1\n\n kappa = torch.exp(-dist_sq / h_expanded_twice) # ... x Kx x Ky\n\n # Construct the gradient\n h_expanded_thrice = torch.unsqueeze(h_expanded_twice, -1)\n # ... x 1 x 1 x 1\n kappa_expanded = torch.unsqueeze(kappa, -1)\n kappa_grad = -2 * diff / h_expanded_thrice * kappa_expanded\n # ... x Kx x Ky x D\n\n return {'output': kappa, 'gradient': kappa_grad}" }, { "alpha_fraction": 0.5402342081069946, "alphanum_fraction": 0.5432565212249756, "avg_line_length": 28.730337142944336, "blob_id": "69a128852cc5bac894a74f6abedc1bb998dc695e", "content_id": "de7134631acf225a4db63e2f441c04ad2485d1c8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2647, "license_type": "permissive", "max_line_length": 80, "num_lines": 89, "path": "/robolearn/utils/exploration_strategies/smooth_noise_strategy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport numpy.random as nr\nimport scipy.ndimage as sp_ndimage\n\nfrom robolearn.utils.exploration_strategies.base import ExplorationStrategy\nfrom robolearn.utils.serializable import Serializable\n\n\nclass SmoothNoiseStrategy(ExplorationStrategy, Serializable):\n \"\"\"\n Based on Finn gps implementation.\n \"\"\"\n\n def __init__(\n self,\n action_space,\n horizon,\n smooth=True,\n renormalize=True,\n sigma=10.0,\n sigma_scale=None,\n ):\n Serializable.quick_init(self, locals())\n\n self._action_space = action_space\n self.low = action_space.low\n self.high = action_space.high\n\n self._horizon = horizon\n\n self._smooth = smooth\n self._renormalize = renormalize\n self._sigma = sigma\n\n if sigma_scale is None:\n self._sigma_scale = np.ones(self.action_dim)\n else:\n # Check if iterable\n try:\n iter(sigma_scale)\n if len(sigma_scale) != self.action_dim:\n raise ValueError(\"Sigma scale different than action dim\"\n \"(%02d != %02d)\" % (sigma_scale,\n self.action_dim))\n self._sigma_scale = sigma_scale\n except TypeError as te:\n self._sigma_scale = np.repeat(sigma_scale, self.action_dim)\n\n self.noise = None\n self.reset()\n\n @property\n def action_dim(self):\n return np.prod(self._action_space.shape)\n\n def reset(self):\n noise = nr.randn(self._horizon, self.action_dim)\n\n noises = list()\n noises.append(noise.copy())\n\n # Smooth noise\n if self._smooth:\n for i in range(self.action_dim):\n noise[:, i] = \\\n sp_ndimage.filters.gaussian_filter(noise[:, i], self._sigma)\n\n noises.append(noise.copy())\n\n # Renormalize\n if self._renormalize:\n variance = np.var(noise, axis=0)\n noise = noise * np.sqrt(self._sigma_scale) / np.sqrt(variance)\n\n noises.append(noise.copy())\n else:\n noise = noise*np.sqrt(self._sigma_scale)\n\n self.noise = noise\n\n def get_action(self, policy, *args, **kwargs):\n t = kwargs['t']\n kwargs['noise'] = self.noise[t, :]\n action, pol_info = policy.get_action(*args, **kwargs)\n\n return np.clip(action, self.low, self.high), pol_info\n\n def get_actions(self, t, observation, policy, **kwargs):\n raise NotImplementedError\n\n" }, { "alpha_fraction": 0.5063334107398987, "alphanum_fraction": 0.5426447987556458, "avg_line_length": 26.160551071166992, "blob_id": "ad7d1c78f5ab6c91ba97778f1b1cd2d51cecd7ef", "content_id": "d55444ba45e32e518c48b4159d3bb6ed350099e9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5921, "license_type": "permissive", "max_line_length": 78, "num_lines": 218, "path": "/scripts/plot_multiple_pusher.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import plot_multiple_process_iu_returns\nfrom robolearn.utils.plots import plot_process_iu_policies\nfrom robolearn.utils.plots import plot_process_iu_values_errors\nfrom robolearn.utils.plots import plot_process_general_data\nfrom robolearn.utils.plots.learning_process_plots import plot_process_haarnoja\nimport json\n\n# SEEDS = [610, 710, 810, 1010]\nSEEDS = [610]\n# MAX_ITER = 590\nMAX_ITER = 500\n# MAX_ITER = 50\n# STEPS_PER_ITER = 3e3\nSTEPS_PER_ITER = None\nLOG_PREFIX = '/home/desteban/logs/objective_test/pusher'\n\n\nhiu_performance_dict = dict()\n# Subtask 01\nhiu_performance_dict['Subtask 01'] = dict()\nhiu_performance_dict['Subtask 01']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sacB_1_',\n ius=[0],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Subtask 01']['HIU-SAC-E'] = dict(\n dir='sub-1',\n prefix='hiu_sac_prompB_1_',\n ius=[0],\n r_scales=[1.e-0],\n)\n# hiu_performance_dict['Subtask 01']['W1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_0_',\n# ius=[0],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Subtask 01']['W1-5'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new5_0_',\n# ius=[0],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Subtask 01']['E1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_promp_0_',\n# ius=[0],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Subtask 01']['M2-5'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture2_5_',\n# ius=[0],\n# r_scales=[1.e-0],\n# )\n\n# Subtask 02\nhiu_performance_dict['Subtask 02'] = dict()\n# hiu_performance_dict['Subtask 02']['W1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_0_',\n# ius=[1],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Subtask 02']['W1-5'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new5_0_',\n# ius=[1],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Subtask 02']['E1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_promp_0_',\n# ius=[1],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Subtask 02']['M2-5'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture2_5_',\n# ius=[1],\n# r_scales=[1.e-0],\n# )\nhiu_performance_dict['Subtask 02']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sacB_1_',\n ius=[1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Subtask 02']['HIU-SAC-E'] = dict(\n dir='sub-1',\n prefix='hiu_sac_prompB_1_',\n ius=[1],\n r_scales=[1.e-0],\n)\n\n# Maintask\nhiu_performance_dict['Main Task'] = dict()\n# hiu_performance_dict['Main Task']['W1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_0_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\n# # hiu_performance_dict['Main Task']['W1-5'] = dict(\n# # dir='sub-1',\n# # prefix='hiu_sac_new5_0_',\n# # ius=[-1],\n# # r_scales=[1.e-0],\n# # )\n# hiu_performance_dict['Main Task']['E1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_promp_0_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Main Task']['M2-5'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture2_5_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\n# # hiu_performance_dict['Main Task']['W1-1'] = dict(\n# # dir='sub-1',\n# # prefix='hiu_sac_new_1_',\n# # ius=[-1],\n# # r_scales=[1.e-0],\n# # )\n# hiu_performance_dict['Main Task']['M1-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture1_0_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\n# # hiu_performance_dict['Main Task']['M1-10'] = dict(\n# # dir='sub-1',\n# # prefix='hiu_sac_new_mixture1_10_',\n# # ius=[-1],\n# # r_scales=[1.e-0],\n# # )\n# hiu_performance_dict['Main Task']['M3-0'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture3_0_',\n# ius=[-1],\n# r_scales=[1.e-0],\n#\nhiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sacB_1_',\n ius=[-1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Main Task']['HIU-SAC-E'] = dict(\n dir='sub-1',\n prefix='hiu_sac_prompB_1_',\n ius=[-1],\n r_scales=[1.e-0],\n)\n\n\ndef get_full_seed_paths(full_dict):\n categories = list(full_dict.keys())\n\n for cc, cate in enumerate(categories):\n expt_dict = full_dict[cate]\n expts = list(expt_dict)\n # print(expt_dict)\n expt_counter = 0\n for ee, expt in enumerate(expts):\n # print(expt['dir'])\n run_dict = expt_dict[expt]\n expt_dir = os.path.join(LOG_PREFIX, run_dict['dir'])\n if len(list_files_startswith(expt_dir, run_dict['prefix'])) > 0:\n expt_counter += 1\n dirs_and_iu = list()\n dir_prefix = os.path.join(expt_dir, run_dict['prefix'])\n # print(dir_prefix)\n for seed in SEEDS:\n full_seed_dir = dir_prefix + str(seed)\n # print('- ', full_seed_dir)\n if os.path.exists(full_seed_dir):\n # print('YES DATA IN: %s' % full_seed_dir)\n dirs_and_iu.append((\n full_seed_dir,\n run_dict['ius'],\n run_dict['r_scales'],\n ))\n full_dict[cate][expt] = dirs_and_iu\n if expt_counter == 0:\n full_dict.pop(cate)\n return full_dict\n\n\ndef list_files_startswith(directory, prefix):\n return list(f for f in os.listdir(directory) if f.startswith(prefix))\n\n\ndef list_files_endswith(directory, suffix):\n return list(f for f in os.listdir(directory) if f.endswith(suffix))\n\n\ndef main():\n directories_dict = get_full_seed_paths(hiu_performance_dict)\n\n plot_multiple_process_iu_returns(\n directories_dict,\n max_iter=MAX_ITER,\n steps_per_iter=STEPS_PER_ITER,\n )\n\n\nif __name__ == '__main__':\n main()\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.5463494658470154, "alphanum_fraction": 0.5777276158332825, "avg_line_length": 26.862857818603516, "blob_id": "9967eed0473f3b6b451be42f1fd8ce626889cdbf", "content_id": "f4ec857d708011cd47d49534720a7adae3582c69", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4876, "license_type": "permissive", "max_line_length": 78, "num_lines": 175, "path": "/scripts/plot_multiple_navigation2d.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import plot_multiple_process_iu_returns\nfrom robolearn.utils.plots import plot_process_iu_policies\nfrom robolearn.utils.plots import plot_process_iu_values_errors\nfrom robolearn.utils.plots import plot_process_general_data\nfrom robolearn.utils.plots.learning_process_plots import plot_process_haarnoja\nimport json\n\n# POSSIBLE_SEEDS = [110, 210, 310, 410, 510, 610, 710, 810, 910, 1010]\n# SEEDS = [610, 710, 810, 910, 1010]\n# MAX_ITER = 500\n# STEPS_PER_ITER = 1e2\n# LOG_PREFIX = '/home/desteban/logs/objective_test/navigation2d'\n\n# SEEDS = [610, 710, 810, 1010]\nSEEDS = [610]\nMAX_ITER = 100\n# STEPS_PER_ITER = 3e3\nSTEPS_PER_ITER = None\nLOG_PREFIX = '/home/desteban/logs/objective_test/navigation2d'\n\nfig_name_prefix = 'Navigation2D_'\n\n\n# Compo: Ient:2, Uents:2\n# Compo2: Ient:2, Uents:1\n\n\nhiu_performance_dict = dict()\n# Subtask 01\nhiu_performance_dict['Subtask 01'] = dict()\nhiu_performance_dict['Subtask 01']['SAC'] = dict(\n dir='sub0',\n prefix='sacX_',\n ius=[-1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Subtask 01']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sacX_',\n ius=[0],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Subtask 01']['HIU-SAC-W2'] = dict(\n dir='sub-1',\n prefix='hiu_sacY_',\n ius=[0],\n r_scales=[1.e-0],\n)\n\n\n\n# Subtask 02\nhiu_performance_dict['Subtask 02'] = dict()\nhiu_performance_dict['Subtask 02']['SAC'] = dict(\n dir='sub1',\n prefix='sacX_',\n ius=[-1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Subtask 02']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sacX_',\n ius=[1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Subtask 02']['HIU-SAC-W2'] = dict(\n dir='sub-1',\n prefix='hiu_sacY_',\n ius=[1],\n r_scales=[1.e-0],\n)\n\n\n# Maintask\nhiu_performance_dict['Main Task'] = dict()\nhiu_performance_dict['Main Task']['SAC'] = dict(\n dir='sub-1',\n prefix='sacX_',\n ius=[-1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(\n dir='sub-1',\n prefix='hiu_sacX_',\n ius=[-1],\n r_scales=[1.e-0],\n)\nhiu_performance_dict['Main Task']['HIU-SAC-W2'] = dict(\n dir='sub-1',\n prefix='hiu_sacY_',\n ius=[-1],\n r_scales=[1.e-0],\n)\n# hiu_performance_dict['Main Task']['HIU-SAC-E'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_promp_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\n# hiu_performance_dict['Main Task']['HIU-SAC-M'] = dict(\n# dir='sub-1',\n# prefix='hiu_sac_new_mixture_',\n# ius=[-1],\n# r_scales=[1.e-0],\n# )\n\n\ndef get_full_seed_paths(full_dict):\n categories = list(full_dict.keys())\n\n for cc, cate in enumerate(categories):\n expt_dict = full_dict[cate]\n expts = list(expt_dict)\n # print(expt_dict)\n expt_counter = 0\n for ee, expt in enumerate(expts):\n # print(expt['dir'])\n run_dict = expt_dict[expt]\n expt_dir = os.path.join(LOG_PREFIX, run_dict['dir'])\n if len(list_files_startswith(expt_dir, run_dict['prefix'])) > 0:\n expt_counter += 1\n dirs_and_iu = list()\n dir_prefix = os.path.join(expt_dir, run_dict['prefix'])\n # print(dir_prefix)\n for seed in SEEDS:\n full_seed_dir = dir_prefix + str(seed)\n # print('- ', full_seed_dir)\n if os.path.exists(full_seed_dir):\n # print('YES DATA IN: %s' % full_seed_dir)\n dirs_and_iu.append((\n full_seed_dir,\n run_dict['ius'],\n run_dict['r_scales'],\n ))\n full_dict[cate][expt] = dirs_and_iu\n if expt_counter == 0:\n full_dict.pop(cate)\n return full_dict\n\n\ndef list_files_startswith(directory, prefix):\n return list(f for f in os.listdir(directory) if f.startswith(prefix))\n\n\ndef list_files_endswith(directory, suffix):\n return list(f for f in os.listdir(directory) if f.endswith(suffix))\n\n\ndef main(args):\n directories_dict = get_full_seed_paths(hiu_performance_dict)\n\n # directories_dict = get_subtask_and_seed_idxs()\n\n plot_multiple_process_iu_returns(\n directories_dict,\n max_iter=MAX_ITER,\n steps_per_iter=STEPS_PER_ITER,\n fig_name_prefix=fig_name_prefix,\n )\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # parser.add_argument('file', type=str, default='./progress.csv',\n # help='path to the progress.csv file')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n parser.add_argument('--no_in', action='store_false')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.37343358993530273, "alphanum_fraction": 0.45739349722862244, "avg_line_length": 25.600000381469727, "blob_id": "931ebdc148b2fd5539c75254107352074c993a7d", "content_id": "5ab571210aa1a3b45bf9d63639522558fa7a13ac", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "permissive", "max_line_length": 52, "num_lines": 30, "path": "/robolearn/utils/stdout/print_color.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\n\n\nclass PrintColor(object):\n def __init__(self):\n self.colors = {\n 'WHITE': \"\\033[1;37m\",\n 'RED': \"\\033[1;31m\",\n 'GREEN': \"\\033[1;32m\",\n 'YELLOW': \"\\033[1;33m\",\n 'BLUE': \"\\033[1;34m\",\n 'MAGENTA': \"\\033[1;35m\",\n 'CYAN': \"\\033[1;36m\",\n 'GRAY': \"\\033[1;37m\",\n 'PURPLE': \"\\033[1;57m\",\n 'RESET': \"\\033[0;0m\",\n 'BOLD': \"\\033[;1m\",\n 'REVERSE': \"\\033[;7m\",\n }\n\n def change(self, color):\n if color.upper() not in self.colors.keys():\n raise ValueError(\"Wrong color!!\")\n sys.stdout.write(self.colors[color.upper()])\n\n def reset(self):\n sys.stdout.write(self.colors['RESET'])\n\n\nchange_print_color = PrintColor()\n" }, { "alpha_fraction": 0.4915662705898285, "alphanum_fraction": 0.4984799027442932, "avg_line_length": 35.881229400634766, "blob_id": "9b5af2d8ef1c71e58581e1eab2865d73a6c6eebb", "content_id": "4a25f4de7133f6a46d9d465c2cc8f4e73ba38807", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44405, "license_type": "permissive", "max_line_length": 95, "num_lines": 1204, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/mdgps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import gtimer as gt\nimport numpy as np\nimport scipy as sp\nimport torch\nimport math\nimport copy\nimport logging\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.utils.logging import logger\nimport robolearn.torch.utils.pytorch_util as ptu\n\n# from robolearn.utils.plots.core import subplots\n\nfrom collections import OrderedDict\n\nfrom robolearn.algorithms.rl_algos import ConstantPolicyPrior\n\nfrom robolearn.algorithms.rl_algos import generate_noise\nfrom robolearn.algorithms.rl_algos import IterationData\nfrom robolearn.algorithms.rl_algos import TrajectoryInfo\nfrom robolearn.algorithms.rl_algos import PolicyInfo\n\n\nfrom robolearn.algorithms.rl_algos import DynamicsLRPrior\nfrom robolearn.algorithms.rl_algos import DynamicsPriorGMM\n\nfrom robolearn.algorithms.rl_algos import TrajOptLQR\n\n\nclass MDGPS(RLAlgorithm):\n def __init__(self,\n env,\n local_policies,\n global_policy,\n cost_fcn,\n eval_env=None,\n train_cond_idxs=None,\n test_cond_idxs=None,\n num_samples=1,\n test_samples=1,\n noisy_samples=True,\n noise_hyperparams=None,\n seed=10,\n base_kl_step=0.1,\n global_opt_iters=5000,\n global_opt_batch_size=64,\n global_opt_lr=1e-5,\n traj_opt_prev='nn_pol',\n traj_opt_iters=1,\n traj_opt_min_eta=1e-8,\n traj_opt_max_eta=1e16,\n **kwargs):\n\n # TO DEFINE\n self._fit_dynamics = True\n self._initial_state_var = 1.0e-2\n\n self._global_opt_batch_size = global_opt_batch_size\n self._global_opt_iters = global_opt_iters\n self._global_opt_ent_reg = 0.0 # For update pol variance\n self._global_pol_sample_mode = 'add'\n self._global_opt_lr = global_opt_lr\n self._global_samples_counter = 0\n self._first_global_eval = False\n\n self.base_kl_step = base_kl_step\n self._max_step_mult = 3.0\n self._min_step_mult = 0.5\n self._kl_step_rule = 'laplace'\n\n self._traj_opt_iters = traj_opt_iters\n self._max_ent_traj = 0.0\n self._traj_opt_prev = traj_opt_prev\n\n self.T = kwargs['max_path_length']\n self._num_samples = num_samples\n self._test_samples = test_samples\n\n self._train_cond_idxs = train_cond_idxs\n self._test_cond_idxs = test_cond_idxs\n\n # Get dimensions from the environment\n self.dU = env.action_dim\n self.dX = env.obs_dim # TODO: DOING THIS TEMPORALLY\n self.dO = env.obs_dim\n\n # Number of initial conditions\n self.M = len(local_policies)\n\n exploration_policy = global_policy\n\n RLAlgorithm.__init__(\n self,\n env=env,\n exploration_policy=exploration_policy,\n eval_env=eval_env,\n eval_policy=global_policy,\n eval_sampler=self.sample_global_pol,\n **kwargs\n )\n\n # Rename for GPS\n self.global_policy = self.eval_policy\n self.local_policies = local_policies\n\n # Noise to be used with trajectory distributions\n self.noise_data = np.zeros((self.num_epochs, self.M,\n self._num_samples,\n self.T, self.dU))\n self._noisy_samples = noisy_samples\n if self._noisy_samples:\n for ii in range(self.num_epochs):\n for cond in range(self.M):\n for n in range(self._num_samples):\n self.noise_data[ii, cond, n, :, :] = \\\n generate_noise(self.T, self.dU, noise_hyperparams)\n\n # IterationData objects for each condition.\n self.cur = [IterationData() for _ in range(self.M)]\n self.prev = [IterationData() for _ in range(self.M)]\n\n # Trajectory Info\n for m in range(self.M):\n self.cur[m].traj_info = TrajectoryInfo()\n\n if self._fit_dynamics:\n sigma_regu = 1e-6\n prior = DynamicsPriorGMM(\n min_samples_per_cluster=40,\n max_clusters=20,\n max_samples=20,\n strength=1.,\n )\n\n self.cur[m].traj_info.dynamics = \\\n DynamicsLRPrior(prior=prior, sigma_regu=sigma_regu)\n\n self.cur[m].traj_distr = local_policies[m]\n\n # Cost Fcn\n self._cost_fcn = cost_fcn\n\n # Global Policy Optimization\n self.global_pol_optimizer = torch.optim.Adam(\n self.global_policy.parameters(),\n lr=self._global_opt_lr,\n betas=(0.9, 0.999),\n eps=1e-08, # Term added to the denominator for numerical stability\n # weight_decay=0.005,\n weight_decay=0.5,\n amsgrad=True,\n )\n\n # Local Trajectory Information\n self._local_pol_optimizer = TrajOptLQR(\n cons_per_step=False,\n use_prev_distr=False,\n update_in_bwd_pass=True,\n min_eta=traj_opt_min_eta,\n max_eta=traj_opt_max_eta,\n )\n\n level = logging.INFO\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(level)\n console = logging.StreamHandler()\n self.logger.addHandler(console)\n for handler in self.logger.handlers:\n handler.setLevel(level)\n\n self.eval_statistics = None\n\n self._return_fig = None\n self._return_axs = None\n self._return_lines = [None for _ in range(self.n_test_conds)]\n\n # MDGPS data #\n # ---------- #\n for m in range(self.M):\n # Same policy prior type for all conditions\n self.cur[m].pol_info = PolicyInfo(\n T=self.T,\n dU=self.dU,\n dX=self.dX,\n init_pol_wt=0.01,\n )\n self.cur[m].pol_info.policy_prior = ConstantPolicyPrior()\n\n def train(self, start_epoch=0):\n # Get snapshot of initial stuff\n if start_epoch == 0:\n self.training_mode(False)\n params = self.get_epoch_snapshot(-1)\n logger.save_itr_params(-1, params)\n\n self._n_env_steps_total = start_epoch * self.num_train_steps_per_epoch\n\n gt.reset()\n gt.set_def_unique(False)\n\n for epoch in gt.timed_for(\n range(start_epoch, self.num_epochs),\n save_itrs=True,\n ):\n self._start_epoch(epoch)\n\n # self._current_path_builder = PathBuilder()\n\n # Sample from environment using current trajectory distributions\n noise = self.noise_data[epoch]\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Sampling from local trajectories...'\n % (type(self).__name__, epoch))\n paths = self.sample_local_pol(noise=noise)\n self._exploration_paths = paths\n # self._handle_path(paths)\n self._n_env_steps_total += int(self.n_train_conds*self._num_samples*self.T)\n\n # Iterative learning step\n gt.stamp('sample')\n self._try_to_train()\n gt.stamp('train')\n\n # Evaluate if requirements are met\n self._try_to_eval(epoch)\n gt.stamp('eval')\n self._end_epoch()\n\n def _do_training(self):\n epoch = self._n_epochs\n # batch = self.get_batch()\n paths = self.get_exploration_paths()\n self.logger.info('')\n\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Creating Sample List...'\n % (type(self).__name__, epoch))\n for m, m_train in enumerate(self._train_cond_idxs):\n self.cur[m_train].sample_list = SampleList(paths[m])\n\n # Update dynamics model using all samples.\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Updating dynamics linearization...'\n % (type(self).__name__, epoch))\n self._update_dynamic_model()\n\n # Evaluate sample costs\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Evaluating samples costs...'\n % (type(self).__name__, epoch))\n self._eval_iter_samples_costs()\n\n # Update Networks\n # On the first iteration, need to catch policy up to init_traj_distr.\n if self._n_epochs == 1:\n self.logger.info(\"\\n\"*2)\n self.logger.info('%s: itr:%02d | '\n 'S-step for init_traj_distribution (iter=0)...'\n % (type(self).__name__, epoch))\n self.new_traj_distr = [self.cur[cond].traj_distr\n for cond in range(self.M)]\n self._update_global_policy()\n\n # TODO:\n self.sample_global_pol()\n\n # Update global policy linearizations.\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Updating global policy linearization...'\n % (type(self).__name__, epoch))\n self._update_local_policies_fit()\n\n # Update KL step\n if self._n_epochs > 1:\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Updating KL step size with GLOBAL policy...'\n % (type(self).__name__, epoch))\n self._update_kl_step_size()\n\n # C-step\n self.logger.info('')\n self.logger.info('%s: itr:%02d | '\n 'Updating trajectories...'\n % (type(self).__name__, epoch))\n for ii in range(self._traj_opt_iters):\n self.logger.info('-%s: itr:%02d | Inner iteration %d/%d'\n % (type(self).__name__, epoch, ii+1,\n self._traj_opt_iters))\n self._update_local_policies()\n\n # S-step\n self.logger.info('')\n self.logger.info('%s:itr:%02d | ->| S-step |<-'\n % (type(self).__name__, epoch))\n self._update_global_policy()\n\n # if self.eval_statistics is None:\n # \"\"\"\n # Eval should set this to None.\n # This way, these statistics are only computed for one batch.\n # \"\"\"\n # self.eval_statistics = OrderedDict()\n # # self.eval_statistics['Bellman Residual (QFcn)'] = \\\n # # np.mean(ptu.get_numpy(bellman_residual))\n # self.eval_statistics['Surrogate Reward (Policy)'] = \\\n # np.mean(ptu.get_numpy(surrogate_cost))\n\n def _can_evaluate(self):\n return True\n\n def evaluate(self, epoch):\n statistics = OrderedDict()\n self._update_logging_data()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n paths = self.sample_global_pol()\n\n if paths is None:\n print(\"NO LOGGING LAST SAMPLING\")\n return\n\n cond_returns_mean = np.zeros(len(paths))\n cond_returns_std = np.zeros(len(paths))\n\n for cc, cond_path in enumerate(paths):\n sample_list = SampleList(cond_path)\n\n true_cost, cost_estimate, cost_compo = \\\n self._eval_sample_list_cost(sample_list, self._cost_fcn)\n\n cond_returns_mean[cc] = np.mean(np.sum(true_cost, axis=-1))\n cond_returns_std[cc] = np.std(np.sum(true_cost, axis=-1))\n\n stat_txt = '[Cond-%02d] Global Mean Return' % cc\n statistics[stat_txt] = cond_returns_mean[cc]\n\n stat_txt = '[Cond-%02d] Global Std Return' % cc\n statistics[stat_txt] = cond_returns_std[cc]\n\n stat_txt = '[Cond-%02d] Eta' % cc\n statistics[stat_txt] = self.cur[cc].eta\n\n # stat_txt = 'Mean Return'\n # statistics[stat_txt] = np.mean(cond_returns_mean)\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n self._update_plot(statistics)\n\n def _update_plot(self, statistics):\n # if self._return_fig is None:\n # # self._return_fig, self._return_axs = subplots(1, self.n_test_conds+1)\n # self._return_fig, self._return_axs = plt.subplots(1, self.n_test_conds+1)\n # for aa, ax in enumerate(self._return_axs[:-1]):\n # self._return_lines = \\\n # ax.plot(self._n_epochs,\n # statistics['[Cond-%02d] Mean Return' % aa],\n # color='b',\n # marker='o',\n # markersize=2\n # )\n # # plt.show(block=False)\n # else:\n # for aa, line in enumerate(self._return_lines[:-1]):\n # line.set_xdata(\n # np.append(line.get_xdata(),\n # self._n_epochs)\n # )\n # line.set_ydata(\n # np.append(line.get_ydata(),\n # statistics['[Cond-%02d] Mean Return' % aa])\n # )\n # self._return_fig.canvas.draw()\n # plt_pause(0.01)\n\n # self._return_fig, self._return_axs = plt.subplots(1, self.n_test_conds+1)\n # for aa, ax in enumerate(self._return_axs[:-1]):\n # self._return_lines = \\\n # ax.plot(self._n_epochs,\n # statistics['[Cond-%02d] Mean Return' % aa],\n # color='b',\n # marker='o',\n # markersize=2\n # )\n # self._return_fig.savefig('tempo/fig%02d.png' % self._n_epochs)\n #\n # del self._return_fig\n # del self._return_axs\n # del self._return_lines\n pass\n\n def _update_logging_data(self):\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n\n def _end_epoch(self):\n # TODO: change IterationData to reflect new stuff better\n\n del self.prev\n self.prev = copy.deepcopy(self.cur)\n\n for m in range(self.M):\n self.prev[m].new_traj_distr = self.new_traj_distr[m]\n\n # NEW IterationData object, and remove new_traj_distr\n self.cur = [IterationData() for _ in range(self.M)]\n for m in range(self.M):\n self.cur[m].traj_info = TrajectoryInfo()\n self.cur[m].traj_info.dynamics = \\\n copy.deepcopy(self.prev[m].traj_info.dynamics)\n self.cur[m].step_mult = self.prev[m].step_mult\n self.cur[m].eta = self.prev[m].eta\n self.cur[m].traj_distr = self.new_traj_distr[m]\n self.cur[m].traj_info.last_kl_step = \\\n self.prev[m].traj_info.last_kl_step\n # MDGPS\n self.cur[m].pol_info = copy.deepcopy(self.prev[m].pol_info)\n self.new_traj_distr = None\n\n RLAlgorithm._end_epoch(self)\n\n def _update_dynamic_model(self):\n \"\"\"\n Instantiate dynamics objects and update prior.\n Fit dynamics to current samples.\n \"\"\"\n for m in range(self.M):\n cur_data = self.cur[m].sample_list\n X = cur_data['observations']\n U = cur_data['actions']\n\n # Update prior and fit dynamics.\n self.cur[m].traj_info.dynamics.update_prior(X, U)\n self.cur[m].traj_info.dynamics.fit(X, U)\n\n # Fm = self.cur[m].traj_info.dynamics.Fm\n # fv = self.cur[m].traj_info.dynamics.fv\n # T = -2\n # N = 0\n # oo = X[N, T, :]\n # uu = U[N, T, :]\n # oo_uu = np.concatenate((oo, uu), axis=0)\n # oop1 = Fm[T].dot(oo_uu) + fv[T]\n # print('real', X[N, T+1, :])\n # print('pred', oop1)\n # input('fds')\n\n # Fit x0mu/x0sigma.\n x0 = X[:, 0, :]\n x0mu = np.mean(x0, axis=0)\n self.cur[m].traj_info.x0mu = x0mu\n self.cur[m].traj_info.x0sigma = \\\n np.diag(np.maximum(np.var(x0, axis=0),\n self._initial_state_var))\n\n prior = self.cur[m].traj_info.dynamics.get_prior()\n if prior:\n mu0, Phi, priorm, n0 = prior.initial_state()\n N = len(cur_data)\n self.cur[m].traj_info.x0sigma += \\\n Phi + (N*priorm) / (N+priorm) * \\\n np.outer(x0mu-mu0, x0mu-mu0) / (N+n0)\n\n def _eval_iter_samples_costs(self):\n for cond in range(self.M):\n sample_list = self.cur[cond].sample_list\n\n true_cost, cost_estimate, cost_compo = \\\n self._eval_sample_list_cost(sample_list, self._cost_fcn)\n\n # Cost sample\n self.cur[cond].cs = true_cost # True value of cost.\n\n # Cost composition\n self.cur[cond].cost_compo = cost_compo # Cost 'composition'.\n\n # Cost estimate.\n self.cur[cond].traj_info.Cm = cost_estimate[0] # Quadratic term (matrix).\n self.cur[cond].traj_info.cv = cost_estimate[1] # Linear term (vector).\n self.cur[cond].traj_info.cc = cost_estimate[2] # Constant term (scalar).\n\n def _eval_sample_list_cost(self, sample_list, cost_fcn):\n \"\"\"\n Evaluate costs for a sample_list using a specific cost function.\n Args:\n cost: self.cost_function[cond]\n cond: Condition to evaluate cost on.\n \"\"\"\n # Constants.\n T, dX, dU = self.T, self.dX, self.dU\n N = len(sample_list)\n\n # Compute cost.\n cs = np.zeros((N, T))\n cc = np.zeros((N, T))\n cv = np.zeros((N, T, dX+dU))\n Cm = np.zeros((N, T, dX+dU, dX+dU))\n cost_composition = [None for _ in range(N)]\n for n in range(N):\n sample = sample_list[n]\n # Get costs.\n l, lx, lu, lxx, luu, lux, cost_composition[n] = cost_fcn.eval(sample)\n\n print('XX | cost_compo', [np.sum(co) for co in cost_composition[n]])\n\n # True value of cost\n cs[n, :] = l\n\n # Constant term\n cc[n, :] = l\n\n # Assemble matrix and vector.\n cv[n, :, :] = np.c_[lx, lu]\n Cm[n, :, :, :] = np.concatenate(\n (np.c_[lxx, np.transpose(lux, [0, 2, 1])], np.c_[lux, luu]),\n axis=1\n )\n\n # Adjust for expanding cost around a sample.\n X = sample['observations']\n U = sample['actions']\n yhat = np.c_[X, U]\n rdiff = -yhat\n rdiff_expand = np.expand_dims(rdiff, axis=2)\n cv_update = np.sum(Cm[n, :, :, :] * rdiff_expand, axis=1)\n cc[n, :] += np.sum(rdiff * cv[n, :, :], axis=1) \\\n + 0.5 * np.sum(rdiff * cv_update, axis=1)\n cv[n, :, :] += cv_update\n\n # Expected Costs\n cc = np.mean(cc, axis=0) # Constant term (scalar).\n cv = np.mean(cv, axis=0) # Linear term (vector).\n Cm = np.mean(Cm, axis=0) # Quadratic term (matrix).\n\n return cs, (Cm, cv, cc), cost_composition\n\n def _update_global_policy(self):\n \"\"\"\n Computes(updates) a new global policy.\n :return:\n \"\"\"\n dU, dO, T = self.dU, self.dO, self.T\n # Compute target mean, cov(precision), and weight for each sample;\n # and concatenate them.\n obs_data, tgt_mu = ptu.zeros((0, T, dO)), ptu.zeros((0, T, dU))\n tgt_prc, tgt_wt = ptu.zeros((0, T, dU, dU)), ptu.zeros((0, T))\n for m in range(self.M):\n samples = self.cur[m].sample_list\n X = samples['observations']\n N = len(samples)\n traj = self.new_traj_distr[m]\n pol_info = self.cur[m].pol_info\n mu = ptu.zeros((N, T, dU))\n prc = ptu.zeros((N, T, dU, dU))\n wt = ptu.zeros((N, T))\n obs = ptu.FloatTensor(samples['observations'])\n # Get time-indexed actions.\n for t in range(T):\n # Compute actions along this trajectory.\n prc[:, t, :, :] = ptu.FloatTensor(\n np.tile(traj.inv_pol_covar[t, :, :], [N, 1, 1])\n )\n for i in range(N):\n mu[i, t, :] = ptu.FloatTensor(\n traj.K[t, :, :].dot(X[i, t, :]) + traj.k[t, :]\n )\n wt[:, t] = pol_info.pol_wt[t]\n\n tgt_mu = torch.cat((tgt_mu, mu))\n tgt_prc = torch.cat((tgt_prc, prc))\n tgt_wt = torch.cat((tgt_wt, wt))\n obs_data = torch.cat((obs_data, obs))\n\n self.global_policy_optimization(obs_data, tgt_mu, tgt_prc, tgt_wt)\n\n def global_policy_optimization(self, obs, tgt_mu, tgt_prc, tgt_wt):\n \"\"\"\n Update policy.\n :param obs: Numpy array of observations, N x T x dO.\n :param tgt_mu: Numpy array of mean controller outputs, N x T x dU.\n :param tgt_prc: Numpy array of precision matrices, N x T x dU x dU.\n :param tgt_wt: Numpy array of weights, N x T.\n \"\"\"\n N, T = obs.shape[:2]\n dU = self.dU\n dO = self.dO\n\n # Save original tgt_prc.\n tgt_prc_orig = torch.reshape(tgt_prc, [N*T, dU, dU])\n\n # Renormalize weights.\n tgt_wt *= (float(N * T) / torch.sum(tgt_wt))\n # Allow ights to be at most twice the robust median.\n mn = torch.median(tgt_wt[tgt_wt > 1e-2])\n tgt_wt = torch.clamp(tgt_wt, max=2 * mn)\n # Robust median should be around one.\n tgt_wt /= mn\n\n # Reshape inputs.\n obs = torch.reshape(obs, (N*T, dO))\n tgt_mu = torch.reshape(tgt_mu, (N*T, dU))\n tgt_prc = torch.reshape(tgt_prc, (N*T, dU, dU))\n tgt_wt = torch.reshape(tgt_wt, (N*T, 1, 1))\n\n # Fold weights into tgt_prc.\n tgt_prc = tgt_wt * tgt_prc\n\n # TODO: DO THIS MORE THAN ONCE!!\n if not hasattr(self.global_policy, 'scale') or not hasattr(self.global_policy, 'bias'):\n # 1e-3 to avoid infs if some state dimensions don't change in the\n # first batch of samples\n self.global_policy.scale = ptu.zeros(self.explo_env.obs_dim)\n self.global_policy.bias = ptu.zeros(self.explo_env.obs_dim)\n\n m = self._global_samples_counter\n n = m + N*T\n\n scale_obs = torch.diag(1.0 / torch.clamp(torch.std(obs, dim=0),\n min=1e-3))\n var_obs = scale_obs**2\n var_prev = self.global_policy.scale**2\n\n bias_obs = -torch.mean(obs.matmul(scale_obs), dim=0)\n bias_prev = self.global_policy.bias\n bias_new = float(n/(m+n))*bias_obs + float(m/(m+n))*bias_prev\n\n var_new = float(n/(m+n))*var_obs + float(m/(m+n))*var_prev - \\\n float((m*n)/(m+n)**2)*(bias_prev - bias_new)**2\n self.global_policy.scale = torch.sqrt(var_new)\n self.global_policy.bias = bias_new\n\n # self.global_policy.scale = ptu.eye(self.env.obs_dim)\n # self.global_policy.bias = ptu.zeros(self.env.obs_dim)\n\n # Normalize Inputs\n obs = obs.matmul(self.global_policy.scale) + self.global_policy.bias\n\n # # Global Policy Optimization\n # self.global_pol_optimizer = torch.optim.Adam(\n # self.global_policy.parameters(),\n # lr=self._global_opt_lr,\n # betas=(0.9, 0.999),\n # eps=1e-08, # Term added to the denominator for numerical stability\n # # weight_decay=0.005,\n # weight_decay=0.5,\n # amsgrad=True,\n # )\n\n # Assuming that N*T >= self.batch_size.\n batches_per_epoch = math.floor(N*T / self._global_opt_batch_size)\n idx = list(range(N*T))\n average_loss = 0\n np.random.shuffle(idx)\n\n if torch.any(torch.isnan(obs)):\n raise ValueError('GIVING NaN OBSERVATIONS to PYTORCH')\n if torch.any(torch.isnan(tgt_mu)):\n raise ValueError('GIVING NaN ACTIONS to PYTORCH')\n if torch.any(torch.isnan(tgt_prc)):\n raise ValueError('GIVING NaN PRECISION to PYTORCH')\n\n for oo in range(1):\n print('$$$$\\n'*2)\n print('GLOBAL_OPT %02d' % oo)\n print('$$$$\\n'*2)\n # # Global Policy Optimization\n # self.global_pol_optimizer = torch.optim.Adam(\n # self.global_policy.parameters(),\n # lr=self._global_opt_lr,\n # betas=(0.9, 0.999),\n # eps=1e-08, # Term added to the denominator for numerical stability\n # # weight_decay=0.005,\n # weight_decay=0.5,\n # amsgrad=True,\n # )\n\n for ii in range(self._global_opt_iters):\n # # Load in data for this batch.\n # start_idx = int(ii * self._global_opt_batch_size %\n # (batches_per_epoch * self._global_opt_batch_size))\n # idx_i = idx[start_idx:start_idx+self._global_opt_batch_size]\n\n # Load in data for this batch.\n idx_i = np.random.choice(N*T, self._global_opt_batch_size)\n\n self.global_pol_optimizer.zero_grad()\n\n pol_output = self.global_policy(obs[idx_i], deterministic=True)[0]\n\n train_loss = euclidean_loss(mlp_out=pol_output,\n action=tgt_mu[idx_i],\n precision=tgt_prc[idx_i],\n batch_size=self._global_opt_batch_size)\n\n train_loss.backward()\n self.global_pol_optimizer.step()\n\n average_loss += train_loss.item()\n\n # del pol_output\n # del train_loss\n loss_tolerance = 5e-10\n\n if (ii+1) % 50 == 0:\n print('PolOpt iteration %d, average loss %f'\n % (ii+1, average_loss/50))\n average_loss = 0\n\n if train_loss <= loss_tolerance:\n print(\"It converged! loss:\", train_loss)\n break\n\n if train_loss <= loss_tolerance:\n break\n\n # Optimize variance.\n A = torch.sum(tgt_prc_orig, dim=0) \\\n + 2 * N * T * self._global_opt_ent_reg * ptu.ones((dU, dU))\n A = A / torch.sum(tgt_wt)\n\n # TODO - Use dense covariance?\n self.global_policy.std = torch.diag(torch.sqrt(A))\n\n def _global_pol_prob(self, obs):\n dU = self.dU\n N, T = obs.shape[:2]\n\n # Normalize obs.\n if hasattr(self.global_policy, 'scale'):\n # TODO: Should prob be called before update?\n obs_scale = ptu.get_numpy(self.global_policy.scale)\n obs_bias = ptu.get_numpy(self.global_policy.bias)\n for n in range(N):\n obs[n, :] = obs[n, :].dot(obs_scale) + obs_bias\n else:\n raise AssertionError('WE ARE NOT NORMALIZING THE OBS!!!')\n\n output = np.zeros((N, T, dU))\n\n # for i in range(N):\n # for t in range(T):\n # # Feed in data.\n # feed_dict = {self.obs_tensor: np.expand_dims(obs[i, t], axis=0)}\n # with tf.device(self.device_string):\n # output[i, t, :] = self.sess.run(self.act_op,\n # feed_dict=feed_dict)\n output = ptu.get_numpy(self.global_policy(ptu.from_numpy(obs),\n deterministic=True)[0]\n )\n\n pol_var = ptu.get_numpy(self.global_policy.std) ** 2\n\n # Same variance for all time steps\n pol_sigma = np.tile(np.diag(pol_var), [N, T, 1, 1])\n pol_prec = np.tile(np.diag(1.0 / pol_var), [N, T, 1, 1])\n pol_det_sigma = np.tile(np.prod(pol_var), [N, T])\n\n return output, pol_sigma, pol_prec, pol_det_sigma\n\n def _update_kl_step_size(self):\n estimate_cost_fcn = self._local_pol_optimizer.estimate_cost\n\n # Compute previous cost and previous expected cost.\n prev_M = len(self.prev) # May be different in future.\n prev_laplace = np.empty(prev_M)\n prev_mc = np.empty(prev_M)\n prev_predicted = np.empty(prev_M)\n for m in range(prev_M):\n prev_nn = self.prev[m].pol_info.traj_distr()\n prev_lg = self.prev[m].new_traj_distr\n\n # Compute values under Laplace approximation. This is the policy\n # that the previous samples were actually drawn from under the\n # dynamics that were estimated from the previous samples.\n prev_laplace[m] = estimate_cost_fcn(prev_nn,\n self.prev[m].traj_info).sum()\n # This is the actual cost that we experienced.\n prev_mc[m] = self.prev[m].cs.mean(axis=0).sum()\n # This is the policy that we just used under the dynamics that\n # were estimated from the prev samples (so this is the cost\n # we thought we would have).\n prev_predicted[m] = estimate_cost_fcn(prev_lg,\n self.prev[m].traj_info).sum()\n\n # Compute current cost.\n cur_laplace = np.empty(self.M)\n cur_mc = np.empty(self.M)\n for m in range(self.M):\n cur_nn = self.cur[m].pol_info.traj_distr()\n # This is the actual cost we have under the current trajectory\n # based on the latest samples.\n cur_laplace[m] = estimate_cost_fcn(cur_nn,\n self.cur[m].traj_info).sum()\n cur_mc[m] = self.cur[m].cs.mean(axis=0).sum()\n\n # Compute predicted and actual improvement.\n prev_laplace = prev_laplace.mean()\n prev_mc = prev_mc.mean()\n prev_predicted = prev_predicted.mean()\n cur_laplace = cur_laplace.mean()\n cur_mc = cur_mc.mean()\n\n if self._kl_step_rule == 'laplace':\n predicted_impr = prev_laplace - prev_predicted\n actual_impr = prev_laplace - cur_laplace\n elif self._kl_step_rule == 'mc':\n predicted_impr = prev_mc - prev_predicted\n actual_impr = prev_mc - cur_mc\n else:\n raise AttributeError('Wrong kl_step_rule')\n\n for m in range(self.M):\n self._set_new_mult(predicted_impr, actual_impr, m)\n\n def _set_new_mult(self, predicted_impr, actual_impr, m):\n \"\"\"\n Adjust step size multiplier according to the predicted versus\n actual improvement.\n \"\"\"\n # Model improvement as I = predicted_dI * KL + penalty * KL^2,\n # where predicted_dI = pred/KL and penalty = (act-pred)/(KL^2).\n # Optimize I w.r.t. KL: 0 = predicted_dI + 2 * penalty * KL =>\n # KL' = (-predicted_dI)/(2*penalty) = (pred/2*(pred-act)) * KL.\n # Therefore, the new multiplier is given by pred/2*(pred-act).\n new_mult = predicted_impr / (2.0 * max(1e-4,\n predicted_impr - actual_impr))\n new_mult = max(0.1, min(5.0, new_mult))\n new_step = max(min(new_mult * self.cur[m].step_mult,\n self._max_step_mult),\n self._min_step_mult\n )\n self.cur[m].step_mult = new_step\n\n if new_mult > 1:\n print('%s: Increasing step size multiplier to %f'\n % (type(self).__name__, new_step))\n else:\n print('%s: Decreasing step size multiplier to %f'\n % (type(self).__name__, new_step))\n\n def _update_local_policies(self):\n\n if self.new_traj_distr is None:\n self.new_traj_distr = [self.cur[cond].traj_distr\n for cond in range(self.M)]\n\n for cond in range(self.M):\n traj_opt_outputs = \\\n self._local_pol_optimizer.update(cond, self,\n prev_type=self._traj_opt_prev)\n self.new_traj_distr[cond] = traj_opt_outputs[0]\n self.local_policies[cond] = traj_opt_outputs[0]\n self.cur[cond].eta = traj_opt_outputs[1]\n\n def _update_local_policies_fit(self):\n \"\"\"\n Re-estimate the local policy values in the neighborhood of the trajectory.\n :return: None\n \"\"\"\n for cond in range(self.M):\n dX, dU, T = self.dX, self.dU, self.T\n # Choose samples to use.\n samples = self.cur[cond].sample_list\n N = len(samples)\n pol_info = self.cur[cond].pol_info\n X = samples['observations'].copy()\n obs = samples['observations'].copy()\n\n pol_mu, pol_sig = self._global_pol_prob(obs)[:2]\n pol_info.pol_mu, pol_info.pol_sig = pol_mu, pol_sig\n\n # Update policy prior.\n policy_prior = pol_info.policy_prior\n # TODO: THE FOLLOWING IS USELESS FOR CONSTANT PRIOR\n # samples = SampleList(self.cur[cond].sample_list)\n # mode = self._global_pol_sample_mode\n # policy_prior.update(samples, self._global_policy, mode)\n\n # Fit linearization and store in pol_info.\n # max_var = self.cur[cond].traj_distr.max_var\n max_var = None\n pol_info.pol_K, pol_info.pol_k, pol_info.pol_S = \\\n policy_prior.fit(X, pol_mu, pol_sig, max_var=max_var)\n\n for t in range(T):\n pol_info.chol_pol_S[t, :, :] = \\\n sp.linalg.cholesky(pol_info.pol_S[t, :, :])\n\n def compute_traj_cost(self, cond, eta, augment=True):\n \"\"\"\n Compute cost estimates used in the LQR backward pass.\n\n :param cond: Number of condition\n :param eta: Dual variable corresponding to KL divergence with\n previous policy.\n :param augment: True if we want a KL constraint for all time-steps.\n False otherwise. True for MDGPS\n :return: Cm and cv\n \"\"\"\n\n traj_info = self.cur[cond].traj_info\n traj_distr = self.cur[cond].traj_distr # We do not use it\n\n if not augment: # Whether to augment cost with term to penalize KL\n return traj_info.Cm, traj_info.cv\n\n T = self.T\n dX = self.dX\n dU = self.dU\n\n Cm, cv = np.copy(traj_info.Cm), np.copy(traj_info.cv)\n\n # Pol_info\n pol_info = self.cur[cond].pol_info\n\n # Weight of maximum entropy term in trajectory optimization\n multiplier = self._max_ent_traj\n\n # Surrogate cost\n PKLm = np.zeros((T, dX+dU, dX+dU))\n PKLv = np.zeros((T, dX+dU))\n\n # TODO: 'WARN: adding a beta to divisor in compute_traj_cost')\n eps = 1e-8\n divisor = (eta + multiplier + eps)\n fCm = Cm / divisor\n fcv = cv / divisor\n\n # Add in the KL divergence with previous policy.\n for t in range(self.T):\n if self._traj_opt_prev == 'nn_pol':\n # Policy KL-divergence terms.\n inv_pol_S = np.linalg.solve(\n pol_info.chol_pol_S[t, :, :],\n np.linalg.solve(pol_info.chol_pol_S[t, :, :].T, np.eye(dU))\n )\n KB = pol_info.pol_K[t, :, :]\n kB = pol_info.pol_k[t, :]\n else:\n # Policy KL-divergence terms.\n inv_pol_S = self.cur[cond].traj_distr.inv_pol_covar[t, :, :]\n KB = self.cur[cond].traj_distr.K[t, :, :]\n kB = self.cur[cond].traj_distr.k[t, :]\n\n PKLm[t, :, :] = np.vstack([\n np.hstack([KB.T.dot(inv_pol_S).dot(KB), -KB.T.dot(inv_pol_S)]),\n np.hstack([-inv_pol_S.dot(KB), inv_pol_S])\n ])\n PKLv[t, :] = np.concatenate([\n KB.T.dot(inv_pol_S).dot(kB), -inv_pol_S.dot(kB)\n ])\n\n fCm[t, :, :] += PKLm[t, :, :] * eta / divisor\n fcv[t, :] += PKLv[t, :] * eta / divisor\n\n return fCm, fcv\n\n def sample_local_pol(self, noise):\n conditions = self._train_cond_idxs\n all_paths = list()\n for cc, cond in enumerate(conditions):\n paths = list()\n # policy = self.local_policies[cc]\n policy = self.cur[cc].traj_distr\n\n for ss in range(self._num_samples):\n observations = []\n actions = []\n rewards = []\n terminals = []\n agent_infos = []\n env_infos = []\n\n o = self.explo_env.reset(condition=cond)\n next_o = None\n for t in range(self.T):\n a, agent_info = \\\n policy.get_action(o, t, noise[cc, ss, t])\n\n # Checking NAN\n nan_number = np.isnan(a)\n if np.any(nan_number):\n print(\"\\e[31mERROR ACTION: NAN!!!!!\")\n a[nan_number] = 0\n\n next_o, r, d, env_info = self.explo_env.step(a)\n\n observations.append(o)\n rewards.append(r)\n terminals.append(d)\n actions.append(a)\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n o = next_o\n\n actions = np.array(actions)\n if len(actions.shape) == 1:\n actions = np.expand_dims(actions, 1)\n\n observations = np.array(observations)\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, 1)\n next_o = np.array([next_o])\n\n next_observations = np.vstack(\n (\n observations[1:, :],\n np.expand_dims(next_o, 0)\n )\n )\n\n path = dict(\n observations=observations,\n actions=actions,\n rewards=np.array(rewards).reshape(-1, 1),\n next_observations=next_observations,\n terminals=np.array(terminals).reshape(-1, 1),\n agent_infos=agent_infos,\n env_infos=env_infos,\n )\n paths.append(path)\n all_paths.append(paths)\n\n return all_paths\n\n def sample_global_pol(self):\n\n conditions = self._test_cond_idxs\n all_paths = list()\n for cc, cond in enumerate(conditions):\n paths = list()\n policy = self.global_policy\n obs_scale = ptu.get_numpy(policy.scale)\n obs_bias = ptu.get_numpy(policy.bias)\n\n for ss in range(self._test_samples):\n observations = []\n actions = []\n rewards = []\n terminals = []\n agent_infos = []\n env_infos = []\n\n o = self.explo_env.reset(condition=cond)\n next_o = None\n\n for t in range(self.T):\n pol_input = o.dot(obs_scale) + obs_bias\n # print(o)\n # print(pol_input)\n # print(obs_scale)\n # print(obs_bias)\n # print(pol_input)\n\n a, agent_info = \\\n policy.get_action(pol_input, deterministic=True)\n\n # local_pol = self.local_policies[cc]\n # local_act = local_pol.get_action(o, t, np.zeros(7))[0]\n # print(t, 'local', local_act)\n # print(t, 'NN', a)\n # if self.cur[cc].pol_info.pol_mu is not None:\n # pol_lin = self.cur[cc].pol_info.traj_distr()\n # pol_lin_act = pol_lin.get_action(o, t, np.zeros(7))[0]\n # print(t, 'lin', pol_lin_act)\n #\n # new_local_pol = self.new_traj_distr[cc]\n # new_local_act = new_local_pol.get_action(o, t, np.zeros(7))[0]\n # print(t, 'new_local', new_local_act)\n #\n # if self._traj_opt_prev == 'traj':\n # a = new_local_act\n # print('--')\n\n # Checking NAN\n nan_number = np.isnan(a)\n if np.any(nan_number):\n print(\"\\e[31mERROR ACTION: NAN!!!!!\")\n a[nan_number] = 0\n\n next_o, r, d, env_info = self.explo_env.step(a)\n\n observations.append(o)\n rewards.append(r)\n terminals.append(d)\n actions.append(a)\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n o = next_o\n\n actions = np.array(actions)\n if len(actions.shape) == 1:\n actions = np.expand_dims(actions, 1)\n\n observations = np.array(observations)\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, 1)\n next_o = np.array([next_o])\n\n next_observations = np.vstack(\n (\n observations[1:, :],\n np.expand_dims(next_o, 0)\n )\n )\n\n path = dict(\n observations=observations,\n actions=actions,\n rewards=np.array(rewards).reshape(-1, 1),\n next_observations=next_observations,\n terminals=np.array(terminals).reshape(-1, 1),\n agent_infos=agent_infos,\n env_infos=env_infos,\n )\n paths.append(path)\n all_paths.append(paths)\n\n return all_paths\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Stuff to save in file.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n snapshot = super(MDGPS, self).get_epoch_snapshot(epoch)\n snapshot.update(\n global_policy=self.global_policy,\n local_policies=self.local_policies,\n )\n\n return snapshot\n\n @property\n def n_train_conds(self):\n return len(self._train_cond_idxs)\n\n @property\n def n_test_conds(self):\n return len(self._test_cond_idxs)\n\n @property\n def networks(self):\n networks_list = [\n self.global_policy\n ]\n\n return networks_list\n\n\nclass SampleList(object):\n def __init__(self, sample_list):\n self._sample_list = [dict(\n observations=sample['observations'],\n actions=sample['actions'],\n ) for sample in sample_list]\n\n def __getitem__(self, arg):\n if arg == 'observations':\n return np.asarray([data['observations']\n for data in self._sample_list])\n elif arg == 'actions':\n return np.asarray([data['actions']\n for data in self._sample_list])\n elif isinstance(arg, int):\n return self._sample_list[arg]\n else:\n raise AttributeError('Wrong argument')\n\n def __len__(self):\n return len(self._sample_list)\n\n\ndef euclidean_loss(mlp_out, action, precision, batch_size):\n scale_factor = 2.*batch_size\n\n u = action-mlp_out\n uP = torch.matmul(u.unsqueeze(1), precision).squeeze(1)\n # This last dot product is then summed, so we just the sum all at once.\n uPu = torch.sum(uP*u)\n return uPu/scale_factor\n\n # uPu = torch.sum(u**2)\n # return uPu/scale_factor\n\n # uPu = 0.5*torch.sum(mlp_out**2)\n # return uPu\n" }, { "alpha_fraction": 0.5239221453666687, "alphanum_fraction": 0.5386574268341064, "avg_line_length": 28.08465576171875, "blob_id": "08099d854a275677b1d84b14865aa75263402fc5", "content_id": "67c1a8013a8540083ec604251aa78199851f18bc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5497, "license_type": "permissive", "max_line_length": 79, "num_lines": 189, "path": "/robolearn/utils/data_management/normalizer.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on code from Marcin Andrychowicz and OpenAI baselines.\n\"\"\"\nimport numpy as np\n\n\nclass RunningNormalizer(object):\n def __init__(self, epsilon=1e-4, shape=(), mean=0, var=1,\n default_clip_range=np.inf):\n self.mean = mean + np.zeros(shape, np.float64)\n self.var = var*np.ones(shape, np.float64)\n self.count = epsilon\n self.default_clip_range = default_clip_range\n\n def update(self, x):\n batch_mean = np.mean(x, axis=0)\n batch_var = np.var(x, axis=0)\n batch_count = x.shape[0]\n self.update_from_moments(batch_mean, batch_var, batch_count)\n\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n self.mean, self.var, self.count = update_mean_var_count_from_moments(\n self.mean, self.var, self.count, batch_mean, batch_var, batch_count\n )\n\n def normalize(self, x, clip_range=None):\n # Get clip range\n if clip_range is None:\n clip_range = self.default_clip_range\n # Get current mean and std\n\n mean, std = self.mean, self.std\n\n if x.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n\n # Return clipped values\n return np.clip((x - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return v * std + mean\n\n @property\n def std(self):\n return np.sqrt(self.var)\n\n\ndef update_mean_var_count_from_moments(mean, var, count,\n batch_mean, batch_var, batch_count):\n delta = batch_mean - mean\n tot_count = count + batch_count\n\n # Calculate new mean\n new_mean = mean + delta*batch_count / tot_count\n\n # Calculate new variance\n m_a = var * count\n m_b = batch_var * batch_count\n M2 = m_a + m_b + np.square(delta) * count*count * batch_count / tot_count\n new_var = M2 / tot_count\n\n # Calculate new count\n new_count = tot_count\n\n return new_mean, new_var, new_count\n\n\nclass Normalizer(object):\n def __init__(\n self,\n size,\n eps=1e-8,\n default_clip_range=np.inf,\n mean=0,\n std=1,\n ):\n self.size = size\n self.eps = eps\n self.default_clip_range = default_clip_range\n self.sum = np.zeros(self.size, np.float32)\n self.sumsq = np.zeros(self.size, np.float32)\n self.count = np.ones(1, np.float32)\n self.mean = mean + np.zeros(self.size, np.float32)\n self.std = std * np.ones(self.size, np.float32)\n self.synchronized = True\n\n def update(self, v):\n if v.ndim == 1:\n v = np.expand_dims(v, 0)\n assert v.ndim == 2\n assert v.shape[1] == self.size\n self.sum += v.sum(axis=0)\n self.sumsq += (np.square(v)).sum(axis=0)\n self.count[0] += v.shape[0]\n self.synchronized = False\n\n def normalize(self, v, clip_range=None):\n if not self.synchronized:\n self.synchronize()\n if clip_range is None:\n clip_range = self.default_clip_range\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return np.clip((v - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n if not self.synchronized:\n self.synchronize()\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return mean + v * std\n\n def synchronize(self):\n self.mean[...] = self.sum / self.count[0]\n self.std[...] = np.sqrt(\n np.maximum(\n np.square(self.eps),\n self.sumsq / self.count[0] - np.square(self.mean)\n )\n )\n self.synchronized = True\n\n\nclass IdentityNormalizer(object):\n def __init__(self, *args, **kwargs):\n pass\n\n def update(self, v):\n pass\n\n def normalize(self, v, clip_range=None):\n return v\n\n def denormalize(self, v):\n return v\n\n\nclass FixedNormalizer(object):\n def __init__(\n self,\n size,\n default_clip_range=np.inf,\n mean=0,\n std=1,\n eps=1e-8,\n ):\n assert std > 0\n std = std + eps\n self.size = size\n self.default_clip_range = default_clip_range\n self.mean = mean + np.zeros(self.size, np.float32)\n self.std = std + np.zeros(self.size, np.float32)\n self.eps = eps\n\n def set_mean(self, mean):\n self.mean = mean + np.zeros(self.size, np.float32)\n\n def set_std(self, std):\n std = std + self.eps\n self.std = std + np.zeros(self.size, np.float32)\n\n def normalize(self, v, clip_range=None):\n if clip_range is None:\n clip_range = self.default_clip_range\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return np.clip((v - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n mean, std = self.mean, self.std\n if v.ndim == 2:\n mean = mean.reshape(1, -1)\n std = std.reshape(1, -1)\n return mean + v * std\n\n def copy_stats(self, other):\n self.set_mean(other.mean)\n self.set_std(other.std)\n" }, { "alpha_fraction": 0.5988326668739319, "alphanum_fraction": 0.602918267250061, "avg_line_length": 33.96598815917969, "blob_id": "92f224606e06ff4e83b10a6e460704f378567b6f", "content_id": "644b13b2289bd6cfcbaf467765211b5a4f559f5c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5140, "license_type": "permissive", "max_line_length": 80, "num_lines": 147, "path": "/scripts/sim_navigation2d_goalcompo.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers import rollout\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import set_gpu_mode\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.envs.simple_envs.navigation2d import Navigation2dGoalCompoEnv\nfrom robolearn.torch.policies import MultiPolicySelector\nfrom robolearn.torch.policies import WeightedMultiPolicySelector\nfrom robolearn.torch.policies import TanhGaussianPolicy\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.models.policies import ExplorationPolicy\nimport os\nimport argparse\nimport joblib\nimport uuid\nfrom robolearn.utils.logging import logger\nimport json\nimport numpy as np\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfilename = str(uuid.uuid4())\nSEED = 110\n\n\ndef simulate_policy(args):\n\n np.random.seed(SEED)\n ptu.seed(SEED)\n\n data = joblib.load(args.file)\n if args.deterministic:\n if args.un > -1:\n print('Using the deterministic version of the UNintentional policy '\n '%02d.' % args.un)\n if 'u_policy' in data:\n policy = MakeDeterministic(\n MultiPolicySelector(data['u_policy'], args.un))\n # WeightedMultiPolicySelector(data['u_policy'], args.un))\n else:\n # policy = MakeDeterministic(data['u_policies'][args.un])\n if isinstance(data['policy'], TanhGaussianPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = MakeDeterministic(\n WeightedMultiPolicySelector(data['policy'], args.un)\n )\n else:\n print('Using the deterministic version of the Intentional policy.')\n if isinstance(data['policy'], ExplorationPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = data['policy']\n else:\n if args.un > -1:\n print('Using the UNintentional stochastic policy %02d' % args.un)\n if 'u_policy' in data:\n # policy = MultiPolicySelector(data['u_policy'], args.un)\n policy = WeightedMultiPolicySelector(data['u_policy'], args.un)\n else:\n policy = WeightedMultiPolicySelector(data['policy'], args.un)\n # policy = data['policy'][args.un]\n else:\n print('Using the Intentional stochastic policy.')\n # policy = data['exploration_policy']\n policy = data['policy']\n\n print(\"Policy loaded!!\")\n\n # Load environment\n dirname = os.path.dirname(args.file)\n with open(os.path.join(dirname,'variant.json')) as json_data:\n log_data = json.load(json_data)\n env_params = log_data['env_params']\n H = int(log_data['path_length'])\n\n env_params.pop('goal', None)\n env = NormalizedBoxEnv(\n Navigation2dGoalCompoEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n print(\"Environment loaded!!\")\n\n if args.gpu:\n set_gpu_mode(True)\n policy.cuda()\n if isinstance(policy, MakeDeterministic):\n if isinstance(policy.stochastic_policy, PyTorchModule):\n policy.stochastic_policy.train(False)\n else:\n if isinstance(policy, PyTorchModule):\n policy.train(False)\n\n while True:\n if args.record:\n rollout_start_fcn = lambda: \\\n env.start_recording_video('pusher_video.mp4')\n rollout_end_fcn = lambda: \\\n env.stop_recording_video()\n else:\n rollout_start_fcn = None\n rollout_end_fcn = None\n\n obs_normalizer = data.get('obs_normalizer')\n\n if args.H != -1:\n H = args.H\n\n path = rollout(\n env,\n policy,\n max_path_length=H,\n animated=True,\n obs_normalizer=obs_normalizer,\n rollout_start_fcn=rollout_start_fcn,\n rollout_end_fcn=rollout_end_fcn,\n )\n\n if hasattr(env, \"log_diagnostics\"):\n env.log_diagnostics([path])\n\n logger.dump_tabular()\n\n if args.record:\n break\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./params.pkl',\n help='path to the snapshot file')\n parser.add_argument('--H', type=int, default=-1,\n help='Max length of rollout')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--deterministic', action=\"store_true\")\n parser.add_argument('--record', action=\"store_true\")\n parser.add_argument('--env', type=str, default='manipulator')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n parser.add_argument('--subtask', action='store_true')\n args = parser.parse_args()\n\n simulate_policy(args)\n input('Press a key to finish the script')\n" }, { "alpha_fraction": 0.4895370900630951, "alphanum_fraction": 0.5015853047370911, "avg_line_length": 35.66279220581055, "blob_id": "2ddcc915fc66b3cd74ea4752b207de3ed2604952", "content_id": "3fd24ae008f2a0b0e73f8e5bff36175ff1d5d22b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3154, "license_type": "permissive", "max_line_length": 76, "num_lines": 86, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/costs/cost_fk.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.algorithms.rl_algos import evall1l2term\nfrom robolearn.algorithms.rl_algos import evallogl2term\nfrom robolearn.algorithms.rl_algos import get_ramp_multiplier\n\nfrom robolearn.algorithms.rl_algos import RAMP_CONSTANT\n\n\nclass CostState(object):\n def __init__(self, env, state_idxs, target_states=None, wps=None,\n ramp_option=RAMP_CONSTANT, wp_final_multiplier=1.0,\n cost_type='logl2', l1_weight=0., l2_weight=1., alpha=1e-2,\n ):\n self._state_idxs = state_idxs\n\n if target_states is None:\n target_states = [np.zeros(state_idx)\n for state_idx in state_idxs]\n self._target_states = [np.array(tgt) for tgt in target_states]\n self._ramp_option = ramp_option\n self._wp_final_multiplier = wp_final_multiplier\n\n if wps is None:\n wps = [np.ones(state_idx) for state_idx in state_idxs]\n self._wps = [np.array(wp) for wp in wps]\n\n if cost_type == 'logl2':\n self._cost_type = evallogl2term\n elif cost_type == 'l1l2':\n self._cost_type = evall1l2term\n else:\n raise AttributeError(\"Wrong cost_type option\")\n\n self._l1_weight = l1_weight\n self._l2_weight = l2_weight\n self._alpha = alpha\n\n def eval(self, path):\n observations = path['observations']\n T = len(path['observations'])\n Du = path['actions'][-1].shape[0]\n Dx = path['observations'][-1].shape[0]\n\n final_l = np.zeros(T)\n final_lu = np.zeros((T, Du))\n final_lx = np.zeros((T, Dx))\n final_luu = np.zeros((T, Du, Du))\n final_lxx = np.zeros((T, Dx, Dx))\n final_lux = np.zeros((T, Du, Dx))\n\n for state_idx, tgt, wp in zip(self._state_idxs, self._target_states,\n self._wps):\n x = observations[:, state_idx]\n dim_sensor = x.shape[-1]\n wpm = get_ramp_multiplier(\n self._ramp_option, T,\n wp_final_multiplier=self._wp_final_multiplier\n )\n wp = wp * np.expand_dims(wpm, axis=-1)\n\n # Compute state penalty.\n dist = x - tgt\n\n jx = np.tile(np.eye(dim_sensor), [T, 1, 1])\n jxx = np.zeros((T, dim_sensor, dim_sensor, dim_sensor))\n\n # Evaluate penalty term.\n l, ls, lss = self._cost_type(wp, dist, jx, jxx,\n self._l1_weight,\n self._l2_weight,\n self._alpha,\n )\n final_l += l\n\n final_lx[:, state_idx] += ls\n temp_idx = np.ix_(state_idx, state_idx)\n final_lxx[:, temp_idx[0], temp_idx[1]] += lss\n\n # print('**************')\n # print('**************')\n # print('STATE_COST 0', final_lx[0])\n # print('STATE_COST -1', final_lx[-1])\n # print('**************')\n # print('**************')\n\n return final_l, final_lx, final_lu, final_lxx, final_luu, final_lux\n\n" }, { "alpha_fraction": 0.7534246444702148, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 23.33333396911621, "blob_id": "343dc1f9240816ef5c80c25bd7301ab4eed19ee4", "content_id": "b664ce9f5a42a777f22d6e1f52002cbb799576c5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 73, "license_type": "permissive", "max_line_length": 26, "num_lines": 3, "path": "/robolearn/models/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .policies import *\nfrom .values import *\nfrom .transitions import *\n" }, { "alpha_fraction": 0.5868176817893982, "alphanum_fraction": 0.6148891448974609, "avg_line_length": 50.680999755859375, "blob_id": "d30df3199ed2f88ac0625ce7b1cdae9ea4e99dce", "content_id": "1c216a22835a310f861d9ddcca7ddd3495783d5b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37262, "license_type": "permissive", "max_line_length": 136, "num_lines": 721, "path": "/scenarios/taskspace-torquecontrol-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nimport rbdl\nimport rospy\nfrom XCM.msg import CommandAdvr\nfrom XCM.msg import JointStateAdvr\nfrom robolearn.old_utils.tasks.lift_box_utils import create_box_relative_pose\nfrom robolearn.old_utils.tasks.lift_box_utils import reset_bigman_box_gazebo\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation, quaternion_slerp_interpolation\n\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.plot_utils import plot_desired_sensed_data\nfrom robolearn.old_utils.plot_utils import plot_joint_multi_info\nfrom robolearn.old_utils.robot_model import RobotModel\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_drill_relative_pose, create_hand_relative_pose\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import reset_bigman_drill_gazebo\nfrom robolearn.old_utils.transformations_utils import *\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n# Always turn off Gazebo logger\nos.system(\"gz log -d 0\")\ndir_path = os.path.dirname(os.path.abspath(__file__))\ntorques_saved_filename = 'torques_init_traj.npy'\n\n# Time\nT_init = 5 # Time to move from current position to T_init\nT_traj = 5 # Time to execute the trajectory\nfreq = 100 # Frequency (1/Ts)\nNrunning = int(np.ceil((T_traj + 10)*freq))\nobject_to_reach = 'drill'\n\n\nif object_to_reach == 'box':\n # BOX\n box_x = 0.70\n box_y = 0.00\n box_z = 0.0184\n box_yaw = 0 # Degrees\n box_size = [0.4, 0.5, 0.3]\n box_relative_pose = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw)\n box_relative_pose = create_box_relative_pose(box_x=box_x+0.02, box_y=box_y+0.02, box_z=box_z, box_yaw=box_yaw+5)\n final_left_hand_pose = create_hand_relative_pose(box_relative_pose, hand_x=0, hand_y=box_size[1]/2-0.02, hand_z=0, hand_yaw=0)\n final_right_hand_pose = create_hand_relative_pose(box_relative_pose, hand_x=0, hand_y=-box_size[1]/2+0.02, hand_z=0, hand_yaw=0)\nelse:\n # DRILL\n drill_x = 0.70\n drill_y = 0.00\n drill_z = 0.0184\n drill_yaw = 0 # Degrees\n drill_size = [0.1, 0.1, 0.3]\n drill_relative_pose = create_drill_relative_pose(drill_x=drill_x, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw)\n drill_relative_pose = create_drill_relative_pose(drill_x=drill_x+0.02, drill_y=drill_y+0.02, drill_z=drill_z, drill_yaw=drill_yaw+5)\n final_left_hand_pose = create_hand_relative_pose(drill_relative_pose, hand_x=0, hand_y=drill_size[1]/2, hand_z=0, hand_yaw=0)\n final_right_hand_pose = create_hand_relative_pose(drill_relative_pose, hand_x=0, hand_y=-drill_size[1]/2, hand_z=0, hand_yaw=0)\n\n\n# ROBOT MODEL for trying ID\n# robot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'\nrobot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_rbdl_model = rbdl.loadModel(robot_urdf_file, verbose=False, floating_base=False)\nrobot_model = RobotModel(robot_urdf_file=robot_urdf_file)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\ntorso_name = 'DWYTorso'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\ntorso_offset = np.array([0.000, 0.000, 0.000])\n\n# Stiffness/Damping gains from Xbot config file\ndefault_joint_stiffness = np.array([8000., 5000., 8000., 5000., 5000., 2000.,\n 8000., 5000., 5000., 5000., 5000., 2000.,\n 5000., 8000., 5000.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.,\n 300., 300.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.])\ndefault_joint_damping = np.array([30., 50., 30., 30., 30., 5.,\n 30., 50., 30., 30., 30., 5.,\n 30., 50., 30.,\n 30., 50., 30., 30., 1., 5., 1.,\n 1., 1.,\n 30., 50., 30., 30., 1., 5., 1.])\nKp_tau = np.eye(robot_rbdl_model.q_size)*default_joint_stiffness/100\nKd_tau = np.eye(robot_rbdl_model.qdot_size)*default_joint_damping/10\n\n# # Joint gains for Joint Space Torque controller\n# pd_tau_weights = np.array([0.80, 0.50, 0.80, 0.50, 0.50, 0.20,\n# 0.80, 0.50, 0.50, 0.50, 0.50, 0.20,\n# 0.50, 0.80, 0.50,\n# 0.50, 0.80, 0.50, 0.50, 0.03, 0.20, 0.03,\n# 0.03, 0.03,\n# 0.50, 0.80, 0.50, 0.50, 0.03, 0.20, 0.03])\n# Kp_tau = np.eye(robot_rbdl_model.q_size)*(100 * pd_tau_weights)\n# Kd_tau = np.eye(robot_rbdl_model.qdot_size)*(2 * pd_tau_weights)\n\n\n# ROS robot-state\njoint_pos_state = np.zeros(robot_rbdl_model.qdot_size) # Assuming joint state only gives actuated joints state\njoint_vel_state = np.zeros(robot_rbdl_model.qdot_size)\njoint_effort_state = np.zeros(robot_rbdl_model.qdot_size)\njoint_stiffness_state = np.zeros(robot_rbdl_model.qdot_size)\njoint_damping_state = np.zeros(robot_rbdl_model.qdot_size)\njoint_state_id = []\n\n\ndef state_callback(data, params):\n joint_ids = params[0]\n joint_pos = params[1]\n joint_vel = params[2]\n joint_effort = params[3]\n joint_stiffness = params[4]\n joint_damping = params[5]\n # if not joint_ids:\n # joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_pos[joint_ids] = data.link_position\n joint_effort[joint_ids] = data.effort\n joint_vel[joint_ids] = data.link_velocity\n joint_stiffness[joint_ids] = data.stiffness\n joint_damping[joint_ids] = data.damping\n\npublisher = rospy.Publisher(\"/xbotcore/bigman/command\", CommandAdvr, queue_size=10)\nsubscriber = rospy.Subscriber(\"/xbotcore/bigman/joint_states\", JointStateAdvr, state_callback, (joint_state_id,\n joint_pos_state,\n joint_vel_state,\n joint_effort_state,\n joint_stiffness_state,\n joint_damping_state))\nrospy.init_node('torquecontrol_example')\npub_rate = rospy.Rate(freq)\ndes_cmd = CommandAdvr()\n\n\n# Move ALL joints from current position to INITIAL position in position control mode.\ndes_cmd.name = bigman_params['joints_names']\nq_init = np.zeros(robot_rbdl_model.q_size)\n# q_init[15] = np.deg2rad(25)\n# q_init[16] = np.deg2rad(40)\n# q_init[18] = np.deg2rad(-45)\n# q_init[24] = np.deg2rad(25)\n# q_init[25] = np.deg2rad(-40)\n# q_init[27] = np.deg2rad(-45)\n# # q_init[15] = np.deg2rad(25)\n# # q_init[16] = np.deg2rad(40)\n# # q_init[17] = np.deg2rad(0)\n# # q_init[18] = np.deg2rad(-75)\n# # # ----\n# # q_init[24] = np.deg2rad(25)\n# # q_init[25] = np.deg2rad(-40)\n# # q_init[26] = np.deg2rad(0)\n# # q_init[27] = np.deg2rad(-75)\n\n# q_init = np.array([0., 0., 0., 0., 0., 0.,\n# 0., 0., 0., 0., 0., 0.,\n# 0., 0., 0.,\n# 0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633,\n# #0., 0., 0., -1.5708, 0., 0., 0.,\n# 0., 0.,\n# 0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])\n# #0., 0., 0., -1.5708, 0., 0., 0.])\nN = int(np.ceil(T_init*freq))\njoint_init_traj = polynomial5_interpolation(N, q_init, joint_pos_state)[0]\nprint(\"Moving to zero configuration with Position control.\")\nfor ii in range(N):\n des_cmd.position = joint_init_traj[ii, :]\n des_cmd.stiffness = default_joint_stiffness\n des_cmd.damping = default_joint_damping\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\nif object_to_reach == 'box':\n # print(\"Spawning/Moving box\")\n reset_bigman_box_gazebo(box_relative_pose, box_size=None)\nelse:\n # print(\"Spawning/Moving drill\")\n reset_bigman_drill_gazebo(drill_relative_pose, drill_size=None)\n\n\n# PAUSE:\nprint(\"Sleeping some seconds..\")\nrospy.sleep(2)\n\n# Move to REACH pose in torque control mode.\n# ------------------------------------------\nN = int(np.ceil(T_traj*freq))\n# joints_to_move = bigman_params['joint_ids']['LA']# + bigman_params['joint_ids']['TO']\njoints_to_move = bigman_params['joint_ids']['RA']# + bigman_params['joint_ids']['TO']\n# joints_to_move = [bigman_params['joint_ids']['BA'][6]]\n\n# TODO: Temporal, using the current configuration as q_init\nq_init = joint_pos_state.copy()\ninit_left_hand_pose = robot_model.fk(LH_name, q=q_init, body_offset=l_soft_hand_offset, update_kinematics=True,\n rotation_rep='quat')\ninit_right_hand_pose = robot_model.fk(RH_name, q=q_init, body_offset=r_soft_hand_offset, update_kinematics=True,\n rotation_rep='quat')\n\n# Preallocate matrices\nleft_task_space_traj = np.zeros((N, 7))\nleft_task_space_traj_dots = np.zeros((N, 6))\nleft_task_space_traj_ddots = np.zeros((N, 6))\nright_task_space_traj = np.zeros((N, 7))\nright_task_space_traj_dots = np.zeros((N, 6))\nright_task_space_traj_ddots = np.zeros((N, 6))\njoint_traj = np.zeros((N, robot_rbdl_model.q_size))\njoint_traj_dots = np.zeros((N, robot_rbdl_model.qdot_size))\njoint_traj_ddots = np.zeros((N, robot_rbdl_model.qdot_size))\n\n# Joint space interpolation\n# -------------------------\n# final_left_hand_pose = init_left_hand_pose.copy()\n# final_right_hand_pose = init_right_hand_pose.copy()\n# op_matrix = tf.transformations.quaternion_matrix(final_left_hand_pose[:4])\n# op_matrix = op_matrix.dot(tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 0, 1]))\n# final_left_hand_pose[:4] = tf.transformations.quaternion_from_matrix(op_matrix)\n# op_matrix = tf.transformations.quaternion_matrix(final_right_hand_pose[:4])\n# op_matrix = op_matrix.dot(tf.transformations.rotation_matrix(np.deg2rad(90), [0, 0, 1]))\n# final_right_hand_pose[:4] = tf.transformations.quaternion_from_matrix(op_matrix)\n# final_left_hand_pose[6] += 0.2\n# final_right_hand_pose[6] += 0.2\nprint(\"Initial LH pose %s\" % init_left_hand_pose)\nprint(\"Final LH pose %s\" % final_left_hand_pose)\nprint(\"Initial RH pose %s\" % init_right_hand_pose)\nprint(\"Final RH pose %s\" % final_right_hand_pose)\nprint('#'*10)\n\nq_reach = robot_model.ik(LH_name, final_left_hand_pose, body_offset=l_soft_hand_offset,\n mask_joints=bigman_params['joint_ids']['TO'], joints_limits=bigman_params['joints_limits'],\n method='optimization')\n# Get configuration only for Right Arm\nq_reach[bigman_params['joint_ids']['RA']] = robot_model.ik(RH_name, final_right_hand_pose,\n body_offset=r_soft_hand_offset,\n mask_joints=bigman_params['joint_ids']['TO'],\n joints_limits=bigman_params['joints_limits'],\n method='optimization')[bigman_params['joint_ids']['RA']]\n# q_reach[bigman_params['joint_ids']['RA']] = q_reach_right[bigman_params['joint_ids']['RA']]\n\n# # Move to q_reach to check configuration\n# N = int(np.ceil(T_init*freq))\n# joint_init_traj = polynomial5_interpolation(N, q_reach, joint_pos_state)[0]\n# print(\"Moving to q_reach with Position control.\")\n# for ii in range(N):\n# des_cmd.position = joint_init_traj[ii, :]\n# des_cmd.stiffness = default_joint_stiffness\n# des_cmd.damping = default_joint_damping\n# publisher.publish(des_cmd)\n# pub_rate.sleep()\n# init_left_hand_pose = robot_model.fk(LH_name, q=joint_pos_state, body_offset=l_soft_hand_offset,\n# update_kinematics=True, rotation_rep='quat')\n# init_right_hand_pose = robot_model.fk(RH_name, q=joint_pos_state, body_offset=r_soft_hand_offset,\n# update_kinematics=True, rotation_rep='quat')\n# print(init_left_hand_pose)\n# print(init_right_hand_pose)\n# raw_input('Press a key to continue...')\n\n# -------------\n# Interpolation\n# -------------\ninterpolation_type = 1\nif interpolation_type == 0:\n # Interpolation type 0: First task_space interp, then joint_space\n # ---------------------------------------------------------------\n print('Create task_space trajectory...')\n print(final_left_hand_pose[4:])\n print(init_left_hand_pose[4:])\n left_task_space_traj[:, 4:], left_task_space_traj_dots[:, 3:], left_task_space_traj_ddots[:, 3:] = \\\n polynomial5_interpolation(N, final_left_hand_pose[4:], init_left_hand_pose[4:])\n left_task_space_traj[:, :4], left_task_space_traj_dots[:, :3], left_task_space_traj_ddots[:, :3] = \\\n quaternion_slerp_interpolation(N, final_left_hand_pose[:4], init_left_hand_pose[:4])\n left_task_space_traj_dots *= freq\n left_task_space_traj_ddots *= freq**2\n\n right_task_space_traj[:, 4:], right_task_space_traj_dots[:, 3:], right_task_space_traj_ddots[:, 3:] = \\\n polynomial5_interpolation(N, final_right_hand_pose[4:], init_right_hand_pose[4:])\n right_task_space_traj[:, :4], right_task_space_traj_dots[:, :3], right_task_space_traj_ddots[:, :3] = \\\n quaternion_slerp_interpolation(N, final_right_hand_pose[:4], init_right_hand_pose[:4])\n right_task_space_traj_dots *= freq\n right_task_space_traj_ddots *= freq**2\n\n print('Create joint_space trajectory...')\n joint_traj[0, :] = joint_pos_state\n J_left = np.zeros((6, robot_rbdl_model.qdot_size))\n J_right = np.zeros((6, robot_rbdl_model.qdot_size))\n mask_joints = bigman_params['joint_ids']['TO']\n rarm_joints = bigman_params['joint_ids']['RA']\n for ii in range(N-1):\n # print('%d/%d' % (ii, N))\n # Compute the Jacobian matrix\n robot_model.update_jacobian(J_left, LH_name, joint_traj[ii, :], l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J_right, RH_name, joint_traj[ii, :], r_soft_hand_offset, update_kinematics=True)\n J_left[:, mask_joints] = 0\n J_right[:, mask_joints] = 0\n joint_traj_dots[ii, :] = np.linalg.lstsq(J_left, left_task_space_traj_dots[ii, :])[0]\n joint_traj_dots[ii, rarm_joints] = np.linalg.lstsq(J_left, left_task_space_traj_dots[ii, :])[0][rarm_joints]\n #joint_traj[ii, :] = robot_model.ik(LH_name, left_task_space_traj[ii, :], body_offset=l_soft_hand_offset,\n # mask_joints=bigman_params['joint_ids']['TO'],\n # joints_limits=bigman_params['joints_limits'],\n # #method='iterative',\n # method='optimization', regularization_parameter=0.1,\n # q_init=joint_traj[ii-1, :])\n joint_traj[ii+1, :] = joint_traj[ii, :] + joint_traj_dots[ii, :] * 1./freq\n #joint_traj_dots = np.vstack((np.diff(joint_traj, axis=0), np.zeros((1, robot_rbdl_model.qdot_size))))\n #joint_traj_dots *= freq\n joint_traj_ddots = np.vstack((np.diff(joint_traj_dots, axis=0), np.zeros((1, robot_rbdl_model.qdot_size))))\n joint_traj_ddots *= freq\n\n # q_null = joint_pos_state.copy()\n # #q_null[17] = np.deg2rad(-45)\n # #q_null[12] = np.deg2rad(20)\n # #q_null[13] = np.deg2rad(40)\n # joint_traj[:, :], joint_traj_dots[:, :], joint_traj_ddots[:, :] = polynomial5_interpolation(N, q_null, joint_pos_state)\n # joint_traj_dots *= freq\n # joint_traj_ddots *= freq*freq\n\nelif interpolation_type == 1:\n # Interpolation type 1: First joint_space interp, then task_space\n # ---------------------------------------------------------------\n print('Create joint_space trajectory...')\n joint_traj[:, :], joint_traj_dots[:, :], joint_traj_ddots[:, :] = polynomial5_interpolation(N, q_reach, joint_pos_state)\n joint_traj_dots *= freq\n joint_traj_ddots *= freq*freq\n\n print('Create task_space trajectory...')\n J_left = np.zeros((6, robot_rbdl_model.qdot_size))\n J_right = np.zeros((6, robot_rbdl_model.qdot_size))\n mask_joints = bigman_params['joint_ids']['TO']\n for ii in range(N):\n left_task_space_traj[ii, :] = robot_model.fk(LH_name, q=joint_traj[ii, :], body_offset=l_soft_hand_offset,\n update_kinematics=True, rotation_rep='quat')\n right_task_space_traj[ii, :] = robot_model.fk(RH_name, q=joint_traj[ii, :], body_offset=r_soft_hand_offset,\n update_kinematics=True, rotation_rep='quat')\n if ii > 0:\n if quaternion_inner(left_task_space_traj[ii, :4], left_task_space_traj[ii-1, :4]) < 0:\n left_task_space_traj[ii, :4] *= -1\n if quaternion_inner(right_task_space_traj[ii, :4], right_task_space_traj[ii-1, :4]) < 0:\n right_task_space_traj[ii, :4] *= -1\n robot_model.update_jacobian(J_left, LH_name, joint_traj[ii, :], l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J_right, RH_name, joint_traj[ii, :], r_soft_hand_offset, update_kinematics=True)\n J_left[:, mask_joints] = 0\n J_right[:, mask_joints] = 0\n left_task_space_traj_dots[ii, :] = J_left.dot(joint_traj_dots[ii, :])\n right_task_space_traj_dots[ii, :] = J_right.dot(joint_traj_dots[ii, :])\n left_task_space_traj_ddots = np.vstack((np.diff(left_task_space_traj_dots, axis=0), np.zeros((1, 6))))\n right_task_space_traj_ddots = np.vstack((np.diff(right_task_space_traj_dots, axis=0), np.zeros((1, 6))))\n left_task_space_traj_ddots *= freq\n right_task_space_traj_ddots *= freq\n\n\n# -------------------------\n# Task Space Torque Control\n# -------------------------\n# Preallocate matrices\ntau = np.zeros(robot_rbdl_model.qdot_size)\ntau_left = np.zeros(robot_rbdl_model.qdot_size)\ntau_right = np.zeros(robot_rbdl_model.qdot_size)\ntaus_cmd_traj = np.zeros((N, robot_rbdl_model.qdot_size))\ntaus_traj = np.zeros((N, robot_rbdl_model.qdot_size))\nmulti_taus_traj = np.zeros((5, N, robot_rbdl_model.qdot_size))\nqs_traj = np.zeros((N, robot_rbdl_model.q_size))\nqdots_traj = np.zeros((N, robot_rbdl_model.q_size))\n\nJ_left = np.zeros((6, robot_rbdl_model.qdot_size))\nJ_right = np.zeros((6, robot_rbdl_model.qdot_size))\nJ_torso = np.zeros((6, robot_rbdl_model.qdot_size))\nM_left = np.zeros((robot_rbdl_model.qdot_size, robot_rbdl_model.qdot_size))\nM_right = np.zeros((robot_rbdl_model.qdot_size, robot_rbdl_model.qdot_size))\nM_left_bar = np.zeros((6, 6))\nM_right_bar = np.zeros((6, 6))\nc_plus_g = np.zeros(robot_rbdl_model.qdot_size)\ng = np.zeros(robot_rbdl_model.qdot_size)\n\ntask_left_pose_errors = np.zeros((N, 6))\ntask_right_pose_errors = np.zeros((N, 6))\nreal_left_task_space_traj = np.zeros_like(left_task_space_traj)\nreal_right_task_space_traj = np.zeros_like(right_task_space_traj)\nreal_left_task_space_traj_dots = np.zeros_like(left_task_space_traj_dots)\nreal_right_task_space_traj_dots = np.zeros_like(right_task_space_traj_dots)\nleft_singu_distances = np.zeros(N)\nright_singu_distances = np.zeros(N)\n\n# Only control joints_to_move\ndes_cmd.name = [bigman_params['joints_names'][idx] for idx in joints_to_move]\ndes_cmd.position = []\n\nprint(\"Moving to the initial configuration of trajectory with torque control.\")\n#raw_input(\"Press a key to continue...\")\n\n#Kp_task = np.eye(6)*np.array([1000, 1000, 1000, 50., 50., 50.], dtype=np.float64)\n#Kd_task = np.sqrt(Kp_task)\n# Nakanishi: high task space gain setting\nKp_task = np.eye(6)*np.array([1000, 1000, 1000, 50., 50., 50.], dtype=np.float64)\nKd_task = np.sqrt(Kp_task)\n\n# Nakanishi: low task space gain setting\nKp_task = np.eye(6)*np.array([500, 500, 500, 25., 25., 25.], dtype=np.float64)\nKd_task = np.sqrt(Kp_task)\n\n# Domingo: similar than low task space gain setting\nK_ori = np.tile(50, 3)#*0.1\nK_pos = np.tile(20, 3)#*0.1\n#K_ori = np.tile(400, 3)\n#K_pos = np.tile(25, 3)\n#K_pos = np.tile(100, 3)\n#K_ori = np.tile(15, 3)\n#K_pos = np.tile(5, 3)\nKp_task = np.eye(6)*np.r_[K_ori, K_pos]\nKd_task = np.sqrt(Kp_task)\n\n# Joint space Kd gain\nKd_q = Kd_tau#np.eye(robot_model.qdot_size)*0.1\n\n#Kp_null = np.eye(robot_model.qdot_size)*10\n#Kp_null = np.eye(robot_model.qdot_size)*2\n#Kp_null = np.eye(robot_model.qdot_size)*0.2\nKp_null = np.eye(robot_model.qdot_size)*0.6\n\n# Multitask controller\nalpha_left = 1#0.5\nalpha_right = 1#0.5\n\nmask_joints = bigman_params['joint_ids']['TO']\ninf_limits = np.array([bigman_params['joints_limits'][ii][0] for ii in range(robot_model.qdot_size)])\nmax_limits = np.array([bigman_params['joints_limits'][ii][1] for ii in range(robot_model.qdot_size)])\n\ndes_cmd.position = q_init[joints_to_move]\nfor ii in range(Nrunning):\n\n temp_ii = ii\n if ii >= N:\n ii = N - 1\n\n # Get current(sensed) joints values\n current_joint_pos = joint_pos_state.copy()\n current_joint_vel = joint_vel_state.copy()\n current_joint_effort = joint_effort_state.copy()\n\n # Update Jacobian(s)\n robot_model.update_jacobian(J_left, LH_name, current_joint_pos, l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J_right, RH_name, current_joint_pos, r_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J_torso, torso_name, current_joint_pos, torso_offset, update_kinematics=True)\n J_left[:, bigman_params['joint_ids']['LB']] = 0\n J_left[:, bigman_params['joint_ids']['TO']] = 0\n J_left[:, bigman_params['joint_ids']['RA']] = 0\n J_right[:, bigman_params['joint_ids']['LB']] = 0\n J_right[:, bigman_params['joint_ids']['TO']] = 0\n J_right[:, bigman_params['joint_ids']['LA']] = 0\n\n # Update Non linear Effects (Coriolis + gravity forces)\n robot_model.update_nonlinear_forces(c_plus_g, current_joint_pos, current_joint_vel)\n # robot_model.update_nonlinear_forces(c_plus_g, joint_traj[ii, :], joint_traj_dots[ii, :])\n # Update gravity forces\n robot_model.update_gravity_forces(g, current_joint_pos)\n # robot_model.update_gravity_forces(g, joint_traj[ii, :])\n\n # Get J_dot_q_dot(s)\n J_left_dot_q_dot = robot_model.jdqd(LH_name, q=current_joint_pos, qdot=current_joint_vel,\n body_offset=l_soft_hand_offset, update_kinematics=True)\n J_right_dot_q_dot = robot_model.jdqd(RH_name, q=current_joint_pos, qdot=current_joint_vel,\n body_offset=r_soft_hand_offset, update_kinematics=True)\n\n # Get current operational point pose(s)\n real_left_task_space_traj[ii, :] = robot_model.fk(LH_name, q=current_joint_pos, body_offset=l_soft_hand_offset,\n update_kinematics=True, rotation_rep='quat')\n real_right_task_space_traj[ii, :] = robot_model.fk(RH_name, q=current_joint_pos, body_offset=r_soft_hand_offset,\n update_kinematics=True, rotation_rep='quat')\n # Check quaternion inversion\n if ii > 0:\n if quaternion_inner(real_left_task_space_traj[ii, :4], real_left_task_space_traj[ii-1, :4]) < 0:\n real_left_task_space_traj[ii, :4] *= -1\n if quaternion_inner(real_right_task_space_traj[ii, :4], real_right_task_space_traj[ii-1, :4]) < 0:\n real_right_task_space_traj[ii, :4] *= -1\n\n # Calculate current task space velocity(ies)\n real_left_task_space_traj_dots[ii, :] = J_left.dot(joint_vel_state)\n real_right_task_space_traj_dots[ii, :] = J_right.dot(joint_vel_state)\n\n # Calculate pose and velocities errors\n task_left_pose_error = compute_cartesian_error(left_task_space_traj[ii, :], real_left_task_space_traj[ii, :])\n task_right_pose_error = compute_cartesian_error(right_task_space_traj[ii, :], real_right_task_space_traj[ii, :])\n task_left_vel_error = left_task_space_traj_dots[ii, :] - real_left_task_space_traj_dots[ii, :]\n task_right_vel_error = right_task_space_traj_dots[ii, :] - real_right_task_space_traj_dots[ii, :]\n\n # Reference task-space acceleration(s)\n x_left_ddot_r = left_task_space_traj_ddots[ii, :] + Kp_task.dot(task_left_pose_error) + Kd_task.dot(task_left_vel_error)\n x_right_ddot_r = right_task_space_traj_ddots[ii, :] + Kp_task.dot(task_right_pose_error) + Kd_task.dot(task_right_vel_error)\n\n # Update Mass matrix\n # robot_model.update_inertia_matrix(M, joint_traj[ii, :])\n robot_model.update_inertia_matrix(M_left, current_joint_pos)\n robot_model.update_inertia_matrix(M_right, current_joint_pos)\n #M_left[bigman_params['joint_ids']['LB'], bigman_params['joint_ids']['LB']] = 0\n #M_left[bigman_params['joint_ids']['TO'], bigman_params['joint_ids']['TO']] = 0\n #M_left[bigman_params['joint_ids']['RA'], bigman_params['joint_ids']['RA']] = 0\n #M_right[bigman_params['joint_ids']['LB'], bigman_params['joint_ids']['LB']] = 0\n #M_right[bigman_params['joint_ids']['TO'], bigman_params['joint_ids']['TO']] = 0\n #M_right[bigman_params['joint_ids']['LA'], bigman_params['joint_ids']['LA']] = 0\n\n # #F = M_left_bar.dot(x_left_ddot_r)\n # rbdl.CompositeRigidBodyAlgorithm(robot_rbdl_model, current_joint_pos, M, update_kinematics=True)\n # M_left_bar[:, :] = np.linalg.inv(J_left.dot(np.linalg.inv(M)).dot(J_left.T))\n # rbdl.NonlinearEffects(robot_rbdl_model, current_joint_pos, current_joint_vel, c_plus_g)\n # tau_left = J_left.T.dot(M_left_bar).dot(x_left_ddot_r) + c_plus_g\n # # Null space\n # #u_null = Kp_null.dot(joint_traj[ii, :] - current_joint_pos)\n # u_null = Kp_null.dot(q_reach - current_joint_pos)\n # #u_null = Kp_null.dot(joint_traj[0, :] - current_joint_pos)\n # #u_null = 0.5*(q0 - current_joint_pos)*Kp_null.dot(q0 - current_joint_pos)\n # # Method1: Nakanishi\n # J_left_bar = np.linalg.inv(M).dot(J_left.T).dot(M_left_bar)\n # torque_null = (np.eye(robot_model.qdot_size) - J_left.T.dot(J_left_bar.T)).dot(u_null)\n # ## Method2: DeWolf\n # #J_left_bar_T = M_left_bar.dot(J_left).dot(np.linalg.inv(M))\n # #torque_null = (np.eye(robot_model.qdot_size) - J_left.T.dot(J_left_bar_T)).dot(u_null)\n # tau_left += torque_null\n\n # Nakanishi: Gauss Controller (Operational Space Controller in Khatib (1987))\n M_left_bar[:, :] = np.linalg.inv(J_left.dot(np.linalg.inv(M_left)).dot(J_left.T))\n M_right_bar[:, :] = np.linalg.inv(J_right.dot(np.linalg.inv(M_right)).dot(J_right.T))\n J_left_bar = np.linalg.inv(M_left).dot(J_left.T).dot(M_left_bar)\n J_right_bar = np.linalg.inv(M_right).dot(J_right.T).dot(M_right_bar)\n q_error_left = np.zeros_like(current_joint_pos)\n q_error_right = np.zeros_like(current_joint_pos)\n q0 = q_reach\n q0 = joint_traj[ii, :]\n #q0 = joint_traj[ii, :]\n #q0 = q_init\n #q0 = np.array(current_joint_pos.copy())\n #q0[17] = joint_traj[ii, 17]\n #q0[18] = joint_traj[ii, 18]\n #q0[12] = joint_traj[ii, 12]; q0[13] = joint_traj[ii, 13]; q0[14] = joint_traj[ii, 14]\n #q0[bigman_params['joint_ids']['TO']] = joint_traj[ii, bigman_params['joint_ids']['TO']]\n ##q0[12] = 0; q0[13] = 0; q0[14] = 0\n ##q0 = np.zeros_like(current_joint_pos)\n q_error_left[bigman_params['joint_ids']['LA']] = (current_joint_pos - q0)[bigman_params['joint_ids']['LA']]\n q_error_right[bigman_params['joint_ids']['RA']] = (current_joint_pos - q0)[bigman_params['joint_ids']['RA']]\n q_grad_left = Kp_null.dot(q_error_left)\n q_grad_right = Kp_null.dot(q_error_right)\n #q_grad = Kp_null.dot(current_joint_pos - (inf_limits+max_limits)/2)\n alpha = 1\n #torque_null_left = -Kd_q.dot(current_joint_vel)*0 - alpha*q_grad\n #torque_null_right = -Kd_q.dot(current_joint_vel)*0 - alpha*q_grad\n #left_projection_null_times_torque_null = (np.eye(robot_model.qdot_size) - J_left.T.dot(J_left_bar.T)).dot(torque_null_left)\n #right_projection_null_times_torque_null = (np.eye(robot_model.qdot_size) - J_right.T.dot(J_right_bar.T)).dot(torque_null_right)\n #torque_null_left = M.dot(J_right_bar).dot(x_right_ddot_r - J_right_dot_q_dot*0) + right_projection_null_times_torque_null\n #torque_null_right = M.dot(J_left_bar).dot(x_left_ddot_r - J_left_dot_q_dot*0) + left_projection_null_times_torque_null\n torque_null_left = -Kd_q.dot(current_joint_vel)*0 - alpha*q_grad_left\n torque_null_right = -Kd_q.dot(current_joint_vel)*0 - alpha*q_grad_right\n left_projection_null_times_torque_null = (np.eye(robot_model.qdot_size) - J_left.T.dot(J_left_bar.T)).dot(torque_null_left)\n right_projection_null_times_torque_null = (np.eye(robot_model.qdot_size) - J_right.T.dot(J_right_bar.T)).dot(torque_null_right)\n\n tau_left = M_left.dot(J_left_bar).dot(x_left_ddot_r - J_left_dot_q_dot*0 + J_left.dot(np.linalg.inv(M_left)).dot(g)*0)\\\n + left_projection_null_times_torque_null\n tau_right = M_right.dot(J_right_bar).dot(x_right_ddot_r - J_right_dot_q_dot*0 + J_right.dot(np.linalg.inv(M_right)).dot(g)*0)\\\n + right_projection_null_times_torque_null\n # Multitask controller\n #tau = alpha_left*tau_left*0 + alpha_right*tau_right*0 + c_plus_g\n tau = alpha_left*tau_left + alpha_right*tau_right + g\n\n # # Nakanishi: Dynamical Decoupling Controller Variation 2\n # # (With Null Space Pre-multiplication of M, and Compensation of C and g in Joint Space)\n # rbdl.CompositeRigidBodyAlgorithm(robot_rbdl_model, current_joint_pos, M, update_kinematics=True)\n # M_left_bar[:, :] = np.linalg.inv(J_left.dot(np.linalg.inv(M)).dot(J_left.T))\n # J_left_bar = np.linalg.inv(M).dot(J_left.T).dot(M_left_bar)\n # q0 = q_reach\n # #q0 = np.zeros_like(current_joint_pos)\n # q_grad = Kp_null.dot(current_joint_pos - q0)\n # alpha = 1\n # q_ddot_0 = M.dot(-Kd_q.dot(current_joint_vel)*0 - alpha*q_grad)\n # torque_null = (np.eye(robot_model.qdot_size) - J_left_bar.dot(J_left)).dot(q_ddot_0)\n # tau_left = M.dot(J_left_bar.dot(x_left_ddot_r - J_left_dot_q_dot*0) + torque_null*0) + c_plus_g\n\n\n # Modugno: Unified Framework (UF)\n\n\n # # Del Prete: Sentis' WBC\n # J_1 = J_left\n # J_2 = J_right\n # x_ddot_1 = x_left_ddot_r\n # x_ddot_2 = x_right_ddot_r\n # J_1_dot_q_dot = J_right_dot_q_dot\n # J_2_dot_q_dot = J_right_dot_q_dot\n # Lambda_p_1 = np.linalg.pinv(J_1.dot(np.linalg.inv(M)).dot(J_1.T))\n # Lambda_p_2 = np.linalg.pinv(J_2.dot(np.linalg.inv(M)).dot(J_2.T))\n # #h = c_plus_g\n # h = g\n # #sum_F_p_0 = np.zeros_like(h)\n # #F_p_0 = Lambda_p_0.dot(x_ddot_0 - J_0_dot_q_dot*0 + J_0.dot(np.linalg.inv(M)).dot(h - sum_F_p_0)*0)\n # #sum_J_p_0 = np.zeros((robot_model.qdot_size, robot_model.qdot_size))\n # #J_p_0 = J_0.dot(np.eye(robot_model.qdot_size) - sum_J_p_0)\n # #dyn_consist_J_pseudo_0 = np.linalg.inv(M).dot(J_0.T).dot(Lambda_p_0)\n # #torque_0 = J_p_0.T.dot(F_p_0)\n # sum_F_p_1 = np.zeros_like(h)\n # F_p_1 = Lambda_p_1.dot(x_ddot_1 - J_1_dot_q_dot*0 + J_1.dot(np.linalg.inv(M)).dot(h - sum_F_p_1)*0)\n # sum_J_p_1 = np.zeros((robot_model.qdot_size, robot_model.qdot_size))\n # J_p_1 = J_1.dot(np.eye(robot_model.qdot_size) - sum_J_p_1)\n # dyn_consist_J_pseudo_1 = np.linalg.inv(M).dot(J_1.T).dot(Lambda_p_1)\n # torque_1 = J_p_1.T.dot(F_p_1)\n # sum_F_p_2 = J_p_1.T.dot(F_p_1)\n # F_p_2 = Lambda_p_2.dot(x_ddot_2 - J_2_dot_q_dot*0 + J_2.dot(np.linalg.inv(M)).dot(h - sum_F_p_2)*0)\n # sum_J_p_2 = dyn_consist_J_pseudo_1.dot(J_p_1)\n # J_p_2 = J_2.dot(np.eye(robot_model.qdot_size) - sum_J_p_2)\n # dyn_consist_J_pseudo_2 = np.linalg.inv(M).dot(J_2.T).dot(Lambda_p_2)\n # torque_2 = J_p_2.T.dot(F_p_2)\n # q_error = np.zeros(robot_model.qdot_size)\n # #q_error[bigman_params['joint_ids']['TO']] = (q_init - current_joint_pos)[bigman_params['joint_ids']['TO']]\n # q_error = q_init - current_joint_pos\n # q_ddot_3 = Kp_null.dot(q_error) - Kd_q.dot(current_joint_vel)*0\n # # torque_3 = (np.eye(robot_model.qdot_size) - J_1.T.dot(dyn_consist_J_pseudo_1.T)).dot(q_ddot_3) # Works for no torque_2\n # sum_J_p_3 = dyn_consist_J_pseudo_2.dot(J_p_2) + sum_J_p_2\n # J_p_3 = np.eye(robot_model.qdot_size) - sum_J_p_3\n # torque_3 = J_p_3.T.dot(q_ddot_3)\n # torque = torque_1 + torque_2 + torque_3 + h\n # print(torque_1)\n # print(torque_2)\n # print(torque_3)\n # print(tau_left)\n # print(torque)\n # #raw_input('---')\n # tau = torque\n # #tau = tau_left\n\n # # Joint_space Torque control\n # rbdl.InverseDynamics(robot_rbdl_model, joint_traj[ii, :], joint_traj_dots[ii, :], joint_traj_ddots[ii, :], tau)\n # pd_tau = Kp_tau.dot(joint_traj[ii, :] - joint_pos_state) + Kd_tau.dot(joint_traj_dots[ii, :] - joint_vel_state)\n # tau += pd_tau\n\n # Distance from singularities\n U_l, s_l, V_l = np.linalg.svd(J_left[:, bigman_params['joint_ids']['LA']], full_matrices=False)\n U_r, s_r, V_r = np.linalg.svd(J_right[:, bigman_params['joint_ids']['RA']], full_matrices=False)\n # # \\mu = sqrt(|J J^T|)\n # singu_distance_left = np.sqrt(np.linalg.det(J_left.dot(J_left.T)))\n # singu_distance_right = np.sqrt(np.linalg.det(J_right.dot(J_right.T)))\n # print('%.4f -- %.4f' % (singu_distance_left, singu_distance_right))\n # \\mu = \\prod(\\sigma_i) where \\sigma_i is the i singular value\n singu_distance_left = np.prod(s_l)\n singu_distance_right = np.prod(s_r)\n print(temp_ii)\n print('%.4f -- %.4f' % (singu_distance_left, singu_distance_right))\n singu_distance_left = np.min(s_l)\n singu_distance_right = np.min(s_r)\n print('%.4f -- %.4f' % (singu_distance_left, singu_distance_right))\n print('---')\n left_singu_distances[ii] = singu_distance_left\n right_singu_distances[ii] = singu_distance_right\n\n # Uncomment to send torque references\n des_cmd.position = []\n des_cmd.effort = tau[joints_to_move]\n des_cmd.stiffness = np.zeros_like(tau[joints_to_move])\n des_cmd.damping = np.zeros_like(tau[joints_to_move])\n\n # # Uncomment to send position references\n # des_cmd.position = joint_traj[ii, joints_to_move]\n # des_cmd.effort = []\n # des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n # des_cmd.damping = default_joint_damping[joints_to_move]\n\n publisher.publish(des_cmd)\n taus_cmd_traj[ii, :] = tau\n taus_traj[ii, :] = joint_effort_state\n qs_traj[ii, :] = joint_pos_state\n qdots_traj[ii, :] = joint_vel_state\n task_left_pose_errors[ii, :] = task_left_pose_error\n task_right_pose_errors[ii, :] = task_right_pose_error\n multi_taus_traj[0, ii, :] = tau\n multi_taus_traj[1, ii, :] = tau_left\n multi_taus_traj[2, ii, :] = tau_right\n multi_taus_traj[3, ii, :] = c_plus_g\n multi_taus_traj[4, ii, :] = g\n pub_rate.sleep()\n\n if temp_ii >= N:\n ii = temp_ii\n\n# Return to position control\nprint(\"Changing to position control!\")\ndes_cmd.position = joint_pos_state[joints_to_move]\nfor ii in range(50):\n des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n des_cmd.damping = default_joint_damping[joints_to_move]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n\n# ##### #\n# PLOTS #\n# ##### #\n#joints_to_plot = bigman_params['joint_ids']['LA']# + bigman_params['joint_ids']['TO']\njoints_to_plot = joints_to_move\ncols = 3\ntask_names = ['qx', 'qy', 'qz', 'qw', 'x', 'y', 'z']\ntask_error_names = ['omegax', 'omegay', 'omegaz', 'x', 'y', 'z']\njoint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\nprint(\"Plotting...\")\n#plot_desired_sensed_data(range(7), left_task_space_traj, real_left_task_space_traj, task_names, data_type='pose',\n# block=False, legend=False)\nplot_desired_sensed_data(range(7), right_task_space_traj, real_right_task_space_traj, task_names, data_type='pose',\n block=False, legend=False)\nplot_desired_sensed_data(range(6), task_left_pose_errors, task_right_pose_errors, task_error_names,\n data_type='pose-error', block=False, legend=False)\nplot_desired_sensed_data(joints_to_plot, joint_traj, qs_traj, joint_names, data_type='position',\n limits=bigman_params['joints_limits'], block=False, legend=False)\n#plot_desired_sensed_data(joints_to_plot, np.tile(q_init, (N, 1)), qs_traj, joint_names, data_type='position', block=False)\n#plot_desired_sensed_data(joints_to_plot, joint_traj_dots*0, qdots_traj, joint_names, data_type='velocity', block=False)\nplot_desired_sensed_data(joints_to_plot, taus_cmd_traj, taus_traj, joint_names, data_type='torque', block=False)\n\nplot_joint_multi_info(joints_to_plot, multi_taus_traj, joint_names, data='torque', block=True, cols=3, legend=True,\n labels=['total', 'left', 'right', 'c+g', 'g'])\n\n# plt.figure()\n# plt.plot(left_singu_distances)\n# plt.plot(right_singu_distances)\n# plt.show()\n# plot_desired_sensed_torque_position(joints_to_plot, taus_cmd_traj, taus_traj,\n# joint_traj, qs_traj, joint_names, block=True, cols=cols)\n\nprint(\"Saving sensed torques in %s\" % torques_saved_filename)\nnp.save(torques_saved_filename, taus_traj)\nraw_input('Press a key to close the script!!')\nsys.exit()\n" }, { "alpha_fraction": 0.5313931107521057, "alphanum_fraction": 0.5362595319747925, "avg_line_length": 45.890380859375, "blob_id": "fd6c986642ec5a15ad3a2ad2f6e2d1a92cfd864a", "content_id": "f02291b9644ed359f62366929c1ff1907db66392", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20960, "license_type": "permissive", "max_line_length": 130, "num_lines": 447, "path": "/robolearn/torch/algorithms/rl_algos/gps/dualism.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.v010.algos.gps.gps_utils import PolicyInfo, extract_condition\nfrom robolearn.v010.algos.gps.gps_utils import TrajectoryInfo, DualityInfo\nfrom robolearn.v010.utils.experience_buffer import ExperienceBuffer\nfrom robolearn.v010.utils.experience_buffer import get_bigger_idx, get_smaller_idx\nfrom robolearn.v010.utils.traj_opt.traj_opt_utils import traj_distr_kl, traj_distr_kl_alt\nfrom robolearn.v010.utils.policy_utils import fit_linear_gaussian_policy, lqr_forward\n\nfrom robolearn.v010.utils.sample.sample import Sample\nfrom robolearn.v010.utils.sample.sample_list import SampleList\n\n\nclass Dualism(object):\n def __init__(self):\n # Duality data with: [sample_list, samples_cost, cs_traj, traj_dist, pol_info]\n self._bad_duality_info = [DualityInfo() for _ in range(self.M)]\n self._good_duality_info = [DualityInfo() for _ in range(self.M)]\n\n # Good/Bad Experience Buffer\n for ii in range(self.M):\n buffer_size = self._hyperparams['algo_hyperparams']['n_bad_buffer']\n selection_type = \\\n self._hyperparams['algo_hyperparams']['bad_traj_selection_type']\n self._bad_duality_info[ii].experience_buffer = \\\n ExperienceBuffer(buffer_size, 'bad', selection_type)\n\n buffer_size = self._hyperparams['algo_hyperparams']['n_good_buffer']\n selection_type = \\\n self._hyperparams['algo_hyperparams']['good_traj_selection_type']\n self._good_duality_info[ii].experience_buffer = \\\n ExperienceBuffer(buffer_size, 'good', selection_type)\n\n # TrajectoryInfo for good and bad trajectories\n self.bad_trajs_info = [None for _ in range(self.M)]\n self.good_trajs_info = [None for _ in range(self.M)]\n for m in range(self.M):\n self.bad_trajs_info[m] = TrajectoryInfo()\n self.good_trajs_info[m] = TrajectoryInfo()\n\n if self._hyperparams['fit_dynamics']:\n dynamics = self._hyperparams['dynamics']\n self.bad_trajs_info[m].dynamics = dynamics['type'](dynamics)\n self.good_trajs_info[m].dynamics = dynamics['type'](dynamics)\n\n # TODO: Use demonstration trajectories\n # # Get the initial trajectory distribution hyperparams\n # init_traj_distr = extract_condition(self._hyperparams['init_traj_distr'], self._train_cond_idx[m])\n # Instantiate Trajectory Distribution: init_lqr or init_pd\n # self.good_duality_infor = init_traj_distr['type'](init_traj_distr)\n # self.bad_duality_infor = init_traj_distr['type'](init_traj_distr)\n\n # Get the initial trajectory distribution hyperparams\n init_traj_distr = extract_condition(self._hyperparams['init_traj_distr'],\n self._train_cond_idx[m])\n # Instantiate Trajectory Distribution: init_lqr or init_pd\n self._bad_duality_info[m].traj_dist = init_traj_distr['type'](init_traj_distr)\n self._good_duality_info[m].traj_dist = init_traj_distr['type'](init_traj_distr)\n\n # Same policy prior than GlobalPol for good/bad\n self._hyperparams['algo_hyperparams']['T'] = self.T\n self._hyperparams['algo_hyperparams']['dU'] = self.dU\n self._hyperparams['algo_hyperparams']['dX'] = self.dX\n policy_prior = self._hyperparams['algo_hyperparams']['policy_prior']\n self._bad_duality_info[m].pol_info = \\\n PolicyInfo(self._hyperparams['algo_hyperparams'])\n self._bad_duality_info[m].pol_info.policy_prior = \\\n policy_prior['type'](policy_prior)\n self._good_duality_info[m].pol_info = \\\n PolicyInfo(self._hyperparams['algo_hyperparams'])\n self._good_duality_info[m].pol_info.policy_prior = \\\n policy_prior['type'](policy_prior)\n\n def _update_bad_samples(self):\n \"\"\"Update the Bad samples list and samples cost\n\n Returns: None\n\n \"\"\"\n\n logger = self.logger\n\n for cond in range(self.M):\n # Sample costs estimate.\n if self._hyperparams['algo_hyperparams']['bad_costs']:\n cs = np.zeros_like(self.cur[cond].cs)\n for bc in self._hyperparams['algo_hyperparams']['bad_costs']:\n for ss in range(cs.shape[0]): # Over samples\n cs[ss, :] += self.cur[cond].cost_compo[ss][bc]\n # If this specific cost is zero, then use the total cost\n if np.sum(cs) == 0:\n cs = self.cur[cond].cs\n else:\n cs = self.cur[cond].cs\n\n sample_list = self.cur[cond].sample_list\n\n # Get index of sample with worst Return\n #worst_index = np.argmax(np.sum(cs, axis=1))\n n_bad = self._hyperparams['algo_hyperparams']['n_bad_samples']\n if n_bad == cs.shape[0]:\n worst_indeces = range(n_bad)\n else:\n worst_indeces = get_bigger_idx(np.sum(cs, axis=1), n_bad)\n\n # TODO: Maybe it is better to put this step directly in exp_buffer\n samples_to_add = [sample_list[bad_index] for bad_index in worst_indeces]\n costs_to_add = [cs[bad_index] for bad_index in worst_indeces]\n\n # Get the experience buffer\n exp_buffer = self._bad_duality_info[cond].experience_buffer\n\n # Add to buffer\n exp_buffer.add(samples_to_add, costs_to_add)\n\n # TODO: CHeck if it is better to fit to the whole buffer\n # Get the desired number of elements to fit the traj\n trajs, costs = exp_buffer.get_trajs_and_costs(n_bad)\n\n # TODO: Find a better way than create always SampleList\n self._bad_duality_info[cond].sample_list = SampleList(trajs)\n self._bad_duality_info[cond].samples_cost = costs\n\n # print(sorted(np.sum(cs, axis=1)))\n # print(np.sum(costs_to_add, axis=1))\n # print(np.sum(SampleList(trajs).get_states()))\n # print(np.sum(costs, axis=1))\n # print('buffer:', np.sum([cc for cc in exp_buffer._costs], axis=1))\n # print('%%')\n\n def _update_good_samples(self):\n \"\"\"Update the Good samples list and samples cost\n\n Returns: None\n\n \"\"\"\n logger = self.logger\n\n for cond in range(self.M):\n # Sample costs estimate.\n if self._hyperparams['algo_hyperparams']['bad_costs']:\n cs = np.zeros_like(self.cur[cond].cs)\n for bc in self._hyperparams['algo_hyperparams']['bad_costs']:\n for ss in range(cs.shape[0]): # Over samples\n cs[ss, :] += self.cur[cond].cost_compo[ss][bc]\n # If this specific cost is zero, then use the total cost\n if np.sum(cs) == 0:\n cs = self.cur[cond].cs\n else:\n cs = self.cur[cond].cs\n\n sample_list = self.cur[cond].sample_list\n\n # Get index of sample with best Return\n n_good = self._hyperparams['algo_hyperparams']['n_good_samples']\n if n_good == cs.shape[0]:\n best_indeces = range(n_good)\n else:\n best_indeces = get_smaller_idx(np.sum(cs, axis=1), n_good)\n\n # TODO: Maybe it is better to put this step directly in exp_buffer\n samples_to_add = [sample_list[good_index] for good_index in best_indeces]\n costs_to_add = [cs[good_index] for good_index in best_indeces]\n\n # Get the experience buffer\n exp_buffer = self._good_duality_info[cond].experience_buffer\n\n # Add to buffer\n exp_buffer.add(samples_to_add, costs_to_add)\n\n # TODO: CHeck if it is better to fit to the whole buffer\n # Get the desired number of elements to fit the traj\n trajs, costs = exp_buffer.get_trajs_and_costs(n_good)\n\n # TODO: Find a better way than create always SampleList\n self._good_duality_info[cond].sample_list = SampleList(trajs)\n self._good_duality_info[cond].samples_cost = costs\n\n def _eval_good_bad_samples_costs(self):\n \"\"\"\n Evaluate costs for all current samples for a condition.\n Args:\n cond: Condition to evaluate cost on.\n \"\"\"\n for cond in range(self.M):\n cost_fcn = self.cost_function[cond]\n good_sample_list = self._good_duality_info[cond].sample_list\n bad_sample_list = self._bad_duality_info[cond].sample_list\n\n good_true_cost, good_cost_estimate, _ = \\\n self._eval_sample_list_cost(good_sample_list, cost_fcn)\n bad_true_cost, bad_cost_estimate, _ = \\\n self._eval_sample_list_cost(bad_sample_list, cost_fcn)\n\n # True value of cost (cs average).\n self._good_duality_info[cond].traj_cost = np.mean(good_true_cost,\n axis=0)\n self._bad_duality_info[cond].traj_cost = np.mean(bad_true_cost,\n axis=0)\n\n # Reward estimate.\n self.good_trajs_info[cond].Cm = good_cost_estimate[0] # Quadratic term (matrix).\n self.good_trajs_info[cond].cv = good_cost_estimate[1] # Linear term (vector).\n self.good_trajs_info[cond].cc = good_cost_estimate[2] # Constant term (scalar).\n\n self.bad_trajs_info[cond].Cm = bad_cost_estimate[0] # Quadratic term (matrix).\n self.bad_trajs_info[cond].cv = bad_cost_estimate[1] # Linear term (vector).\n self.bad_trajs_info[cond].cc = bad_cost_estimate[2] # Constant term (scalar).\n\n def _check_kl_div_good_bad(self):\n for cond in range(self.M):\n good_distr = self._good_duality_info[cond].traj_dist\n bad_distr = self._bad_duality_info[cond].traj_dist\n mu_good, sigma_good = lqr_forward(good_distr,\n self.good_trajs_info[cond])\n mu_bad, sigma_bad = lqr_forward(bad_distr,\n self.bad_trajs_info[cond])\n kl_div_good_bad = traj_distr_kl_alt(mu_good, sigma_good,\n good_distr, bad_distr, tot=True)\n #print(\"G/B KL_div: %f \" % kl_div_good_bad)\n self.logger.info('--->Divergence btw good/bad trajs is: %f'\n % kl_div_good_bad)\n\n def _update_good_bad_dynamics(self, option='duality'):\n \"\"\"\n Instantiate dynamics objects and update prior. Fit dynamics to sample(s).\n \"\"\"\n for m in range(self.M):\n if option == 'duality':\n good_data = self._good_duality_info[m].sample_list\n bad_data = self._bad_duality_info[m].sample_list\n else:\n good_data = self.cur[m].sample_list\n bad_data = self.cur[m].sample_list\n\n X_good = good_data.get_states()\n U_good = good_data.get_actions()\n X_bad = bad_data.get_states()\n U_bad = bad_data.get_actions()\n\n # Update prior and fit dynamics.\n self.good_trajs_info[m].dynamics.update_prior(good_data)\n self.good_trajs_info[m].dynamics.fit(X_good, U_good)\n self.bad_trajs_info[m].dynamics.update_prior(bad_data)\n self.bad_trajs_info[m].dynamics.fit(X_bad, U_bad)\n\n # Fit x0mu/x0sigma.\n x0_good = X_good[:, 0, :]\n x0mu_good = np.mean(x0_good, axis=0) # TODO: SAME X0 FOR ALL??\n self.good_trajs_info[m].x0mu = x0mu_good\n self.good_trajs_info[m].x0sigma = np.diag(np.maximum(np.var(x0_good, axis=0),\n self._hyperparams['initial_state_var']))\n x0_bad = X_bad[:, 0, :]\n x0mu_bad = np.mean(x0_bad, axis=0) # TODO: SAME X0 FOR ALL??\n self.bad_trajs_info[m].x0mu = x0mu_bad\n self.bad_trajs_info[m].x0sigma = np.diag(np.maximum(np.var(x0_bad, axis=0),\n self._hyperparams['initial_state_var']))\n\n prior_good = self.good_trajs_info[m].dynamics.get_prior()\n if prior_good:\n mu0, Phi, priorm, n0 = prior_good.initial_state()\n N = len(good_data)\n self.good_trajs_info[m].x0sigma += Phi + (N*priorm) / (N+priorm) * np.outer(x0mu_good-mu0, x0mu_good-mu0) / (N+n0)\n\n prior_bad = self.good_trajs_info[m].dynamics.get_prior()\n if prior_bad:\n mu0, Phi, priorm, n0 = prior_bad.initial_state()\n N = len(bad_data)\n self.bad_trajs_info[m].x0sigma += Phi + (N*priorm) / (N+priorm) * np.outer(x0mu_bad-mu0, x0mu_bad-mu0) / (N+n0)\n\n def _update_good_bad_fit(self):\n min_good_var = self._hyperparams['algo_hyperparams']['min_good_var']\n min_bad_var = self._hyperparams['algo_hyperparams']['min_bad_var']\n\n fit_traj_dist_fcn = fit_linear_gaussian_policy\n\n for cond in range(self.M):\n self._bad_duality_info[cond].traj_dist = \\\n fit_traj_dist_fcn(self._bad_duality_info[cond].sample_list,\n min_bad_var)\n self._good_duality_info[cond].traj_dist = \\\n fit_traj_dist_fcn(self._good_duality_info[cond].sample_list,\n min_good_var)\n self.cur[cond].good_traj_distr = self._good_duality_info[cond].traj_dist\n self.cur[cond].bad_traj_distr = self._bad_duality_info[cond].traj_dist\n\n def _update_good_bad_size(self):\n for m in range(self.M):\n if self.iteration_count >= 1 and self.prev[m].sample_list:\n # Good\n # self.cur[m].good_step_mult = 2*self.cur[m].step_mult\n good_mult = \\\n self._hyperparams['algo_hyperparams']['good_fix_rel_multi']*self.cur[m].step_mult\n new_good = max(\n min(good_mult,\n self._hyperparams['algo_hyperparams']['max_good_mult']),\n self._hyperparams['algo_hyperparams']['min_good_mult']\n )\n self.cur[m].good_step_mult = new_good\n\n #\n traj_info = self.cur[m].traj_info\n current_distr = self.cur[m].traj_distr\n bad_distr = self.cur[m].bad_traj_distr\n prev_traj_info = self.prev[m].traj_info\n prev_distr = self.prev[m].traj_distr\n prev_bad_distr = self.prev[m].bad_traj_distr\n\n actual_laplace = \\\n self.traj_opt.estimate_cost(current_distr, traj_info)\n self.logger.info('actual_laplace: %r' % actual_laplace.sum())\n\n prev_laplace = \\\n self.traj_opt.estimate_cost(prev_distr, traj_info)\n self.logger.info('prev_laplace: %r' % prev_laplace.sum())\n\n bad_laplace = \\\n self.traj_opt.estimate_cost(bad_distr, traj_info)\n\n self.logger.info('actual_bad: %r' % bad_laplace.sum())\n\n prev_bad_laplace = \\\n self.traj_opt.estimate_cost(prev_bad_distr, traj_info)\n\n self.logger.info('prev_bad: %r' % prev_bad_laplace.sum())\n\n\n # THIS WAS BEFORE 08/02\n rel_difference = (1 + (bad_laplace.sum() - actual_laplace.sum())/actual_laplace.sum())\n self.logger.info('Actual/Bad REL difference %r' % rel_difference)\n\n mu_to_check, sigma_to_check = \\\n self.traj_opt.forward(current_distr, traj_info)\n\n kl_div_bad = traj_distr_kl_alt(mu_to_check, sigma_to_check,\n current_distr, bad_distr,\n tot=True)\n print('Current bad_div:', kl_div_bad)\n\n prev_mu_to_check, prev_sigma_to_check = \\\n self.traj_opt.forward(prev_distr, traj_info)\n\n prev_kl_div_bad = \\\n traj_distr_kl_alt(prev_mu_to_check, prev_sigma_to_check,\n prev_distr, prev_bad_distr, tot=True)\n\n rel_kl = max(0,\n 1 + (prev_kl_div_bad - kl_div_bad)/prev_kl_div_bad)\n\n print('#$'*30)\n print('MULTIPLY REL_DIFFERENCE EEEEEEE!!!!!!')\n min_rel_diff = self._hyperparams['algo_hyperparams']['min_bad_rel_diff']\n max_rel_diff = self._hyperparams['algo_hyperparams']['max_bad_rel_diff']\n mult_rel_diff = self._hyperparams['algo_hyperparams']['mult_bad_rel_diff']\n\n rel_difference = min(max(rel_difference, min_rel_diff),\n max_rel_diff)\n rel_difference = mult_rel_diff*rel_difference\n\n self.logger.info('ACTUAL/BAD MULT %r, %r, %r'\n % (min_rel_diff, max_rel_diff, mult_rel_diff))\n self.logger.info('Actual/Bad difference %r' % rel_difference)\n\n # bad_mult = rel_difference*self.cur[m].step_mult\n\n print('BAD REL_DIFFERENCE IS WRT PREV_BAD_MULT!!!!!!')\n bad_mult = rel_difference*1\n\n new_bad = max(\n min(bad_mult,\n self._hyperparams['algo_hyperparams']['max_bad_mult']),\n self._hyperparams['algo_hyperparams']['min_bad_mult']\n )\n self.cur[m].bad_step_mult = new_bad\n\n\n # if not hasattr(self, 'bad_discount'):\n # self.bad_discount = self.kl_bad/self.max_iterations\n # self.cur[m].bad_step_mult = new_bad\n\n def _take_dualist_sample(self, bad_or_good, itr, train_or_test='train'):\n \"\"\"\n Collect a sample from the environment.\n :param traj_or_pol: Use trajectory distributions or current policy.\n 'traj' or 'pol'\n :param itr: Current TrajOpt iteration\n :return:\n \"\"\"\n # If 'pol' sampling, do it with zero noise\n zero_noise = np.zeros((self.T, self.dU))\n\n self.logger.info(\"Sampling with dualism:'%s'\" % bad_or_good)\n\n if train_or_test == 'train':\n conditions = self._train_cond_idx\n elif train_or_test == 'test':\n conditions = self._test_cond_idx\n else:\n raise ValueError(\"Wrong train_or_test option %s\" % train_or_test)\n\n on_policy = False\n total_samples = 1\n save = False # Add sample to agent sample list TODO: CHECK THIS\n\n # A list of SampleList for each condition\n sample_lists = list()\n\n for cc, cond in enumerate(conditions):\n samples = list()\n\n # On-policy or Off-policy\n if on_policy and (self.iteration_count > 0 or\n ('sample_pol_first_itr' in self._hyperparams\n and self._hyperparams['sample_pol_first_itr'])):\n policy = None\n self.logger.info(\"On-policy sampling: %s!\"\n % type(self.agent.policy).__name__)\n else:\n policy = self.cur[cond].traj_distr\n self.logger.info(\"Off-policy sampling: %s!\"\n % type(policy).__name__)\n if bad_or_good == 'bad':\n policy = self.cur[cond].bad_traj_distr\n elif bad_or_good == 'good':\n policy = self.cur[cond].good_traj_distr\n else:\n raise ValueError(\"Wrong bad_or_good option %s\" % bad_or_good)\n self.logger.info(\"Off-policy sampling with dualism: %s (%s)!\"\n % (bad_or_good, type(policy).__name__))\n\n for i in range(total_samples):\n noise = zero_noise\n\n self.env.reset(condition=cond)\n sample_text = \"'%s' sampling | itr:%d/%d, cond:%d/%d, s:%d/%d\" \\\n % (bad_or_good, itr+1, self.max_iterations,\n cond+1, len(conditions),\n i+1, total_samples)\n self.logger.info(sample_text)\n sample = self.agent.sample(self.env, cond, self.T,\n self.dt, noise, policy=policy,\n save=save,\n real_time=self._hyperparams['sample_real_time'])\n samples.append(sample)\n\n sample_lists.append(SampleList(samples))\n\n return sample_lists\n" }, { "alpha_fraction": 0.6187989711761475, "alphanum_fraction": 0.6579634547233582, "avg_line_length": 24.53333282470703, "blob_id": "7530ba5f64645f8b58b51ccb6bd76bb1811ef877", "content_id": "84b58f8fcd37725e311f77a76eac6952c1815ac0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "permissive", "max_line_length": 49, "num_lines": 15, "path": "/examples/miscellaneous/subplots_test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nblock = True\ncols = 3\nactions = np.ones([7, 20])\ndU = actions.shape[0]\nfig, ax = plt.subplots(dU/cols, cols)\nfor ii in range(dU):\n actions[ii, :] = ii\n plt.subplot(dU/cols+1, cols, ii+1)\n fig.canvas.set_window_title(\"Action\"+str(ii))\n fig.set_facecolor((0.5922, 0.6, 1))\n plt.plot(actions[ii])\nplt.show(block=block)\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 22.799999237060547, "blob_id": "62fc3484dcc0e54f2d4a5ec138210488fbdeac69", "content_id": "0e0afadc960f686b8232c2cae8747798c66f7d8e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "permissive", "max_line_length": 66, "num_lines": 5, "path": "/robolearn/utils/stdout/notebook_utils.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from IPython import get_ipython\n\n\ndef is_ipython():\n return type(get_ipython()).__module__.startswith('ipykernel.')\n" }, { "alpha_fraction": 0.8550724387168884, "alphanum_fraction": 0.8550724387168884, "avg_line_length": 69, "blob_id": "316d037d22bf64073e7ccf9e6340b939f524e7ec", "content_id": "867e926f09e5f1a046588c2c7f304987cd7e79a7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "permissive", "max_line_length": 69, "num_lines": 1, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/README.md", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "This algorithm is in process of being migrated completeley to pytorch" }, { "alpha_fraction": 0.5943747758865356, "alphanum_fraction": 0.6064046025276184, "avg_line_length": 35.88750076293945, "blob_id": "2e9d818c09bdf6d86d94e9c8afdc9665ecdc49dd", "content_id": "86e26bd17a31cbc4e6be7c632dc1659606b57446", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5902, "license_type": "permissive", "max_line_length": 80, "num_lines": 160, "path": "/robolearn/envs/rllab_envs/pusher.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os.path as osp\n\nimport numpy as np\n\nfrom rllab.core.serializable import Serializable\nfrom rllab.envs.mujoco.mujoco_env import MujocoEnv\nfrom rllab.misc import logger\nfrom rllab.misc.overrides import overrides\n\n\nclass PusherEnv(MujocoEnv, Serializable):\n \"\"\"Pusher environment\n\n Pusher is a two-dimensional 3-DoF manipulator. Task is to slide a cylinder-\n shaped object, or a 'puck', to a target coordinates.\n \"\"\"\n # FILE_PATH = osp.abspath(osp.join(PROJECT_PATH, 'models', 'pusher.xml'))\n FILE_PATH = osp.join(\n osp.dirname(osp.abspath(__file__)),\n 'pusher.xml'\n )\n\n JOINT_INDS = list(range(0, 3))\n PUCK_INDS = list(range(3, 5))\n TARGET_INDS = list(range(5, 7))\n\n # TODO.before_release Fix target visualization (right now the target is\n # always drawn in (-1, 0), regardless of the actual goal.\n\n def __init__(self, goal=(0, -1), arm_distance_coeff=0):\n \"\"\"\n goal (`list`): List of two elements denoting the x and y coordinates of\n the goal location. Either of the coordinate can also be a string\n 'any' to make the reward not to depend on the corresponding\n coordinate.\n arm_distance_coeff ('float'): Coefficient for the arm-to-object distance\n cost.\n \"\"\"\n super(PusherEnv, self).__init__(file_path=self.FILE_PATH)\n Serializable.quick_init(self, locals())\n\n self._goal_mask = [coordinate != 'any' for coordinate in goal]\n self._goal = np.array(goal)[self._goal_mask].astype(np.float32)\n\n self._arm_distance_coeff = arm_distance_coeff\n self._action_cost_coeff = 0.1\n\n # Make the the complete robot visible when visualizing.\n self.model.stat.extent = 10\n\n def step(self, action):\n reward, info = self.compute_reward(self.get_current_obs(), action)\n\n self.forward_dynamics(action)\n observation = self.get_current_obs()\n done = False\n\n return observation, reward, done, info\n\n def compute_reward(self, observations, actions):\n is_batch = False\n if observations.ndim == 1:\n observations = observations[None]\n actions = actions[None]\n is_batch = True\n\n arm_pos = observations[:, -6:-3]\n obj_pos = observations[:, -3:]\n obj_pos_masked = obj_pos[:, :2][:, self._goal_mask]\n\n goal_dists = np.linalg.norm(self._goal[None] - obj_pos_masked, axis=1)\n arm_dists = np.linalg.norm(arm_pos - obj_pos, axis=1)\n ctrl_costs = np.sum(actions**2, axis=1)\n\n rewards = -self._action_cost_coeff * ctrl_costs - goal_dists\n rewards -= self._arm_distance_coeff * arm_dists\n\n if not is_batch:\n rewards = rewards.squeeze()\n arm_dists = arm_dists.squeeze()\n goal_dists = goal_dists.squeeze()\n\n return rewards, {\n 'arm_distance': arm_dists,\n 'goal_distance': goal_dists,\n 'reward_vector': rewards,\n 'reward_multigoal': [rewards for _ in range(2)],\n }\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n self.viewer.cam.distance = 4.0\n rotation_angle = np.random.uniform(low=-0, high=360)\n if hasattr(self, \"_kwargs\") and 'vp' in self._kwargs:\n rotation_angle = self._kwargs['vp']\n cam_dist = 4\n cam_pos = np.array([0, 0, 0, cam_dist, -45, rotation_angle])\n for i in range(3):\n self.viewer.cam.lookat[i] = cam_pos[i]\n self.viewer.cam.distance = cam_pos[3]\n self.viewer.cam.elevation = cam_pos[4]\n self.viewer.cam.azimuth = cam_pos[5]\n self.viewer.cam.trackbodyid = -1\n\n def reset(self, init_state=None):\n if init_state:\n super(PusherEnv, self).reset(init_state)\n return\n\n qpos = np.random.uniform(\n low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos.squeeze()\n qpos[self.TARGET_INDS] = self.init_qpos.squeeze()[self.TARGET_INDS]\n\n # TODO.before_release: Hack for reproducing the exact results we have in\n # paper, remove before release.\n while True:\n puck_position = np.random.uniform(\n low=[0.3, -1.0], high=[1.0, -0.4]),\n\n bottom_right_corner = np.array([1, -1])\n if np.linalg.norm(puck_position - bottom_right_corner) > 0.45:\n break\n\n qpos[self.PUCK_INDS] = puck_position\n\n qvel = self.init_qvel.copy().squeeze()\n qvel[self.PUCK_INDS] = 0\n qvel[self.TARGET_INDS] = 0\n\n qacc = np.zeros(self.model.data.qacc.shape[0])\n ctrl = np.zeros(self.model.data.ctrl.shape[0])\n\n full_state = np.concatenate((qpos, qvel, qacc, ctrl))\n super(PusherEnv, self).reset(full_state)\n\n return self.get_current_obs()\n\n @overrides\n def get_current_obs(self):\n return np.concatenate([\n self.model.data.qpos.flat[self.JOINT_INDS],\n self.model.data.qvel.flat[self.JOINT_INDS],\n self.get_body_com(\"distal_4\"),\n self.get_body_com(\"object\"),\n ]).reshape(-1)\n\n @overrides\n def log_diagnostics(self, paths):\n arm_dists = [p['env_infos'][-1]['arm_distance'] for p in paths]\n goal_dists = [p['env_infos'][-1]['goal_distance'] for p in paths]\n\n logger.record_tabular('FinalArmDistanceAvg', np.mean(arm_dists))\n logger.record_tabular('FinalArmDistanceMax', np.max(arm_dists))\n logger.record_tabular('FinalArmDistanceMin', np.min(arm_dists))\n logger.record_tabular('FinalArmDistanceStd', np.std(arm_dists))\n\n logger.record_tabular('FinalGoalDistanceAvg', np.mean(goal_dists))\n logger.record_tabular('FinalGoalDistanceMax', np.max(goal_dists))\n logger.record_tabular('FinalGoalDistanceMin', np.min(goal_dists))\n logger.record_tabular('FinalGoalDistanceStd', np.std(goal_dists))\n" }, { "alpha_fraction": 0.5486447215080261, "alphanum_fraction": 0.5907551050186157, "avg_line_length": 27.496551513671875, "blob_id": "a8e57b333cfe03260847d41d78118c06a2bcaf26", "content_id": "f1528d90d8b26cd7e13fb1be0620424fbaceaf40", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4132, "license_type": "permissive", "max_line_length": 78, "num_lines": 145, "path": "/examples/v010/discrete_envs/dualist_test/simple_bandit.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nnp.set_printoptions(precision=2)\nnp.random.seed(0)\n\n\nk = 4 # N bandits\ntime_steps = 10 # T\nmax_iters = 2\n\nactions = np.arange(k)\nrewards = np.array([10, -2, -10, 2])\navg_rewards = np.zeros_like(actions)\naction_counter = np.zeros_like(actions)\nsum_rewards = np.zeros_like(actions)\n\ntemp1 = 5\ntemp2 = 5\n\nw1 = 0.5\nw2 = 0.5\n\nmin_reward = np.inf\nmax_reward = -np.inf\n\nexpected = rewards.mean()\nq_vals1 = np.zeros_like(actions, dtype=np.float64)\nq_vals2 = np.zeros_like(actions, dtype=np.float64)\nq_vals = np.zeros_like(actions, dtype=np.float64)\np1 = np.ones_like(actions)/len(actions)\np2 = np.ones_like(actions)/len(actions)\npolicy = (w1*p1 + w2*p2)/(w1 + w2)\n\nwidth = 0.25\nfig = plt.figure(1)\nax1 = fig.add_subplot(3, 1, 1)\nbar1 = ax1.bar(actions, p1, color='b')\ntext1 = [ax1.text(nn, p1[nn] + .05, str(p1[nn]), color='b',\n horizontalalignment='center') for nn in range(len(policy))]\nax1.set_title('Initial policy')\nax1.set_ylim([0, 1])\nax1.set_xticks(actions)\nax1.set_xticklabels(actions.astype(str))\n\nax2 = fig.add_subplot(3, 1, 2)\nbar2 = ax2.bar(actions, p2, color='r')\n# ax2.set_title('Policy 2')\ntext2 = [ax2.text(nn, p2[nn] + .05, str(p2[nn]), color='r',\n horizontalalignment='center') for nn in range(len(policy))]\nax2.set_ylim([0, 1])\nax2.set_xticks(actions)\nax2.set_xticklabels(actions.astype(str))\n\nax = fig.add_subplot(3, 1, 3)\nbar = ax.bar(actions, policy, color='k')\n# ax2.set_title('Policy 2')\ntext = [ax.text(nn, policy[nn] + .05, str(policy[nn]), color='k',\n horizontalalignment='center') for nn in range(len(policy))]\nax.set_ylim([0, 1])\nax.set_xticks(actions)\nax.set_xticklabels(actions.astype(str))\n\n# fig2 = plt.figure(2)\n# ax2 = fig2.add_subplot(1, 1, 1)\n# bar2 = ax2.bar(actions, p2, color='r')\n# ax2.set_title('Policy 2')\n# ax2.set_ylim([0,1])\n\nplt.subplots_adjust(wspace=0, hspace=0)\nplt.show(block=False)\n\nuser_input = 'whatever'\n\nfor itr in range(max_iters):\n fig.canvas.set_window_title(\"'%d'-Bandit problem | itr:%02d\" % (k, itr))\n\n user_input = input(\"Start itr %02d:\" % itr)\n ax1.set_title('Policy at itr = %02d' % itr)\n\n for t in range(time_steps):\n ax1.set_title('t = %02d' % t)\n\n\n # ACTION SELECTION\n\n # Energy-based policy\n # temp = 0.1*t + 1.e-1\n # temp1 = temp2 = temp\n\n p1 = np.exp(q_vals1/temp1)/np.exp(q_vals1/temp1).sum()\n p2 = np.exp(q_vals2/temp2)/np.exp(q_vals2/temp2).sum()\n\n policy = (w1*p1 + w2*p2)/(w1 + w2)\n action = np.random.choice(actions, p=policy)\n\n # Bandit reward\n # reward = rewards[action]\n reward = np.random.normal(rewards[action], 0.5)\n\n # ax1.set_title('Action: %d | reward: %.2f' % (action, reward))\n\n # POLICY EVALUATION\n user_input = input(\"t=%d | a=%d | Q_vals=%s || Press 'f' to finish:\" %\n (t, action, q_vals))\n if user_input.lower() in ['f']:\n break\n\n # Sample-average Q estimation\n # ix = np.where(action_counter > 0)\n # q_vals[ix] = sum_rewards[ix]/action_counter[ix] # Non-efficient way\n # action_counter[action] += 1\n\n # Incremental formula\n action_counter[action] += 1\n q_error = reward - q_vals[action]\n alpha = 1./action_counter[action] # Step size\n q_vals[action] = q_vals[action] + alpha * q_error\n\n # q_vals = rewards\n\n q_vals1 = q_vals2 = q_vals\n\n sum_rewards[action] += reward\n t += 1\n\n\n\n # Visualize\n for nn in range(len(policy)):\n bar1[nn].set_height(p1[nn])\n bar2[nn].set_height(p2[nn])\n bar[nn].set_height(policy[nn])\n text1[nn].set_position((nn, p1[nn]+0.05))\n text2[nn].set_position((nn, p2[nn]+0.05))\n text[nn].set_position((nn, policy[nn]+0.05))\n text1[nn].set_text(str(np.around(p1[nn], 2)))\n text2[nn].set_text(str(np.around(p2[nn], 2)))\n text[nn].set_text(str(np.around(policy[nn], 2)))\n\n # print(action)\n fig.canvas.draw_idle()\n\n\nprint(\"Script has finished\")\n" }, { "alpha_fraction": 0.4847424328327179, "alphanum_fraction": 0.4908454716205597, "avg_line_length": 34.60942077636719, "blob_id": "a3182220a75ef1010198ed5b80648c18b571b21c", "content_id": "f735615c2319bdff0ca979793672f7234acf31cf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23431, "license_type": "permissive", "max_line_length": 84, "num_lines": 658, "path": "/robolearn/torch/policies/tanh_gaussian_promp_multi_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import math\nimport torch\nfrom torch import nn as nn\nfrom torch.distributions import Normal\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom collections import OrderedDict\nfrom itertools import chain\n\n# LOG_SIG_MAX = 2\n# LOG_SIG_MIN = -3.0\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\n# SIG_MAX = 7.38905609893065\n# SIG_MIN = 0.049787068367863944\n\n# LOG_MIX_COEFF_MIN = -10\n# LOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5\n# LOG_MIX_COEFF_MIN = -1\n# LOG_MIX_COEFF_MAX = 1 #-4.5e-5\n\n# EPS = 1e-12\nEPS = 1e-8\n\n\nclass TanhGaussianPrompMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianPrompMultiPolicy(...)\n action, policy_dict = policy(obs)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0,\n output_w_init='xavier_normal',\n output_b_init_val=0,\n pol_output_activation='linear',\n mix_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n softmax_weights=False,\n **kwargs\n ):\n self.save_init_params(locals())\n PyTorchModule.__init__(self)\n ExplorationPolicy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n\n self._softmax_weights = softmax_weights\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n mixture_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # Multi-Policy Log-Stds Last Layers\n self.stds = stds\n self.log_std = list()\n if stds is None:\n self._pfc_log_std_lasts = list()\n for pol_idx in range(self._n_subpolicies):\n last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc_log_std,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std)\n self._pfc_log_std_lasts.append(last_pfc_log_std)\n self.add_policies_module(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std, idx=pol_idx)\n\n else:\n for std in stds:\n self.log_std.append(torch.log(stds))\n assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX\n\n # ############# #\n # Mixing Layers #\n # ############# #\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n self.mfc_sigmoid = nn.Sigmoid()\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n\n def get_action(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool): True for using mean. False, sample from dist.\n return_log_prob (bool):\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._shared_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n # Last Mean Layers\n means = torch.cat(\n [(\n self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ],\n dim=1\n ) # Batch x Npols x dA\n\n # Last Log-Std Layers\n if self.stds is None:\n log_stds = torch.cat(\n [(\n self._pol_output_activation(\n self._pfc_log_std_lasts[pp](hs[pp])\n )\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ],\n dim=1\n ) # Batch x Npols x dA\n\n # # log_std option 1:\n # log_stds = torch.clamp(log_stds, min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n # log_std option 2:\n log_stds = torch.tanh(log_stds)\n log_stds = \\\n LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN)*(log_stds + 1)\n\n stds = torch.exp(log_stds)\n variances = stds**2\n\n else:\n log_stds = self.log_std\n stds = self.stds\n variances = stds**2\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = h.clone()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n mixture_coeff = \\\n self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)\n\n mixture_coeff = self.mfc_sigmoid(mixture_coeff)\n\n # if torch.isnan(mixture_coeff).any():\n # raise ValueError('Some mixture coeff(s) is(are) NAN: %s' %\n # mixture_coeff)\n #\n # if torch.isnan(means).any():\n # raise ValueError('Some means are NAN: %s' %\n # means)\n #\n # if torch.isnan(stds).any():\n # raise ValueError('Some stds are NAN: %s' %\n # stds)\n\n if pol_idx is None:\n # Calculate weighted means and stds (and log_stds)\n if optimize_policies:\n sig_invs = mixture_coeff/variances\n else:\n sig_invs = mixture_coeff/variances.detach()\n\n variance = 1./torch.sum(sig_invs, dim=1, keepdim=False)\n\n if optimize_policies:\n mean = variance*torch.sum(\n means*sig_invs,\n dim=1,\n keepdim=False\n )\n else:\n mean = variance*torch.sum(\n means.detach()*sig_invs,\n dim=1,\n keepdim=False\n )\n\n # log_std option 1:\n std = torch.sqrt(variance)\n std = torch.clamp(std,\n min=math.exp(LOG_SIG_MIN),\n max=math.exp(LOG_SIG_MAX))\n log_std = torch.log(std)\n # # log_std option 2:\n # variance = torch.tanh(variance)\n # variance = (\n # math.exp(LOG_SIG_MIN)**2 +\n # 0.5*(math.exp(LOG_SIG_MAX)**2 - math.exp(LOG_SIG_MIN)**2) *\n # (variance + 1)\n # )\n # std = torch.sqrt(variance)\n # log_std = torch.log(std)\n\n # TODO: Remove the following?\n # log_std = torch.logsumexp(\n # log_stds + log_mixture_coeff.reshape(-1,\n # self.action_dim,\n # self._n_subpolicies),\n # dim=-1,\n # keepdim=False\n # ) - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)\n\n # log_std = torch.log(std)\n\n else:\n index = self._pols_idxs[pol_idx]\n mean = \\\n torch.index_select(means, dim=1, index=index).squeeze(1)\n std = \\\n torch.index_select(stds, dim=1, index=index).squeeze(1)\n log_std = \\\n torch.index_select(log_stds, dim=1, index=index).squeeze(1)\n variance = \\\n torch.index_select(variances, dim=1, index=index).squeeze(1)\n\n pre_tanh_value = None\n log_prob = None\n pre_tanh_values = None\n log_probs = None\n\n if deterministic:\n action = torch.tanh(mean)\n actions = torch.tanh(means)\n else:\n # # Using this distribution instead of TanhMultivariateNormal\n # # because it has Diagonal Covariance.\n # # Then, a collection of n independent Gaussian r.v.\n # tanh_normal = TanhNormal(mean, std)\n #\n # # # It is the Lower-triangular factor of covariance because it is\n # # # Diagonal Covariance\n # # scale_trils = torch.stack([torch.diag(m) for m in std])\n # # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)\n #\n # if return_log_prob:\n # log_prob = tanh_normal.log_prob(\n # action,\n # pre_tanh_value=pre_tanh_value\n # )\n # log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n noise = self._normal_dist.sample((nbatch,))\n\n pre_tanh_value = std*noise + mean\n pre_tanh_values = stds*noise.unsqueeze(1) + means\n\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n if return_log_prob:\n # Log probability: Main Policy\n log_prob = -((pre_tanh_value - mean) ** 2) / (2*variance) \\\n - log_std - math.log(math.sqrt(2*math.pi))\n log_prob -= torch.log(\n # torch.clamp(1. - action**2, 0, 1)\n clip_but_pass_gradient(1. - action**2, 0, 1)\n + 1.e-6\n )\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n # Log probability: Sub-Policies\n log_probs = -((pre_tanh_values - means) ** 2) / (2*variances)\\\n - log_stds - math.log(math.sqrt(2*math.pi))\n log_probs -= torch.log(\n # torch.clamp(1. - actions**2, 0, 1)\n clip_but_pass_gradient(1. - actions**2, 0, 1)\n + 1.e-6\n )\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n # if torch.isnan(action).any():\n # raise ValueError('ACTION NAN')\n #\n # if torch.isnan(actions).any():\n # raise ValueError('ACTION NAN')\n\n info_dict = dict(\n mean=mean,\n std=std,\n log_std=log_std,\n log_prob=log_prob,\n pre_tanh_value=pre_tanh_value,\n # log_mixture_coeff=log_mixture_coeff,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_means=means,\n pol_stds=stds,\n pol_log_stds=log_stds,\n pol_log_probs=log_probs,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n def log_action(self, actions, obs, pol_idx=None):\n raise NotImplementedError\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n\n\ndef clip_but_pass_gradient(x, l=-1., u=1.):\n clip_up = (x > u).to(ptu.device, dtype=torch.float32)\n clip_low = (x < l).to(ptu.device, dtype=torch.float32)\n return x + ((u - x)*clip_up + (l - x)*clip_low).detach()\n" }, { "alpha_fraction": 0.6307888627052307, "alphanum_fraction": 0.6598531007766724, "avg_line_length": 36.72289276123047, "blob_id": "817ef633af99484b0b11a61ff2531f1edb3b8320", "content_id": "7a9be6699e9935f84605871e372fca5f6a044eb6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3131, "license_type": "permissive", "max_line_length": 117, "num_lines": 83, "path": "/examples/miscellaneous/noise-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis script helps to tune the noise hyperparameters.\n\"\"\"\n\nimport numpy as np\nimport scipy.ndimage as sp_ndimage\nfrom robolearn.old_utils.plot_utils import plot_multi_info\n\n# Noise hyperparams\n#noise_var_scale = 1.0e-0 # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n#noise_var_scale = np.array([5.0e-1, 5.0e-1, 5.0e-1, 5.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])\n#init_noise_var_scale = np.array([1.5e+0, 1.5e+0, 1.5e+0, 1.5e+0, 5.0e-1, 5.0e-1, 5.0e-1])\ninit_noise_var_scale = np.ones(7)\nfinal_noise_var_scale = None # np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])\nsmooth_noise = True # Apply Gaussian filter to noise generated\nsmooth_noise_var = 5.0e-0 # Variance to apply to Gaussian Filter\nsmooth_noise_renormalize = True # Renormalize smooth noise to have variance=1\nsmoth_noise_renormalize_val = init_noise_var_scale\nT = 500\ndU = 7\n\nif not issubclass(type(init_noise_var_scale), list) and not issubclass(type(init_noise_var_scale), np.ndarray):\n initial_noise = np.tile(init_noise_var_scale, dU)\nelif len(init_noise_var_scale) == dU:\n initial_noise = init_noise_var_scale\nelse:\n raise TypeError(\"init_noise_var_scale size (%d) does not match dU (%d)\" % (len(init_noise_var_scale), dU))\n\nif final_noise_var_scale is not None:\n if not issubclass(type(final_noise_var_scale), list) and not issubclass(type(final_noise_var_scale), np.ndarray):\n final_noise = np.tile(final_noise_var_scale, dU)\n elif len(final_noise_var_scale) == dU:\n final_noise = final_noise_var_scale\n else:\n raise TypeError(\"final_noise_var_scale size (%d) does not match dU (%d)\" % (len(final_noise_var_scale), dU))\n\n scale = np.zeros([T, dU])\n\n for u in range(dU):\n scale[:, u] = np.linspace(initial_noise[u], final_noise[u], T)\n\nelse:\n scale = initial_noise\n\n\n# Generate noise and scale\n#noise = np.random.randn(T, dU)*np.sqrt(scale)\nnoise = np.random.randn(T, dU)\n\ntemp_noise_list = list()\nlabels = list()\ntemp_noise_list.append(noise.copy())\nlabels.append('Noise')\n\nprint(\"*\"*30)\nprint(\"Noise Max:%s\" % np.max(noise, axis=0))\nprint(\"Noise Min:%s\" % np.min(noise, axis=0))\nif smooth_noise:\n # Smooth noise. This violates the controller assumption, but\n # might produce smoother motions.\n for i in range(dU):\n noise[:, i] = sp_ndimage.filters.gaussian_filter(noise[:, i], sigma=smooth_noise_var)\n temp_noise_list.append(noise.copy())\n labels.append('Smooth noise')\n print('')\n print(\"*\"*20)\n print(\"Smooth noise Max:%s\" % np.max(noise, axis=0))\n print(\"Smooth noise Min:%s\" % np.min(noise, axis=0))\n\n if smooth_noise_renormalize:\n variance = np.var(noise, axis=0)\n noise = noise * smoth_noise_renormalize_val / np.sqrt(variance)\n\n temp_noise_list.append(noise.copy())\n labels.append('Smooth noise renormalized')\n\n print('')\n print(\"*\"*20)\n print(\"Smooth noise renormalized Max:%s\" % np.max(noise, axis=0))\n print(\"Smooth noise renormalized Min:%s\" % np.min(noise, axis=0))\n\n\nplot_multi_info(temp_noise_list, block=True, cols=3, legend=True, labels=labels)\n" }, { "alpha_fraction": 0.6054564714431763, "alphanum_fraction": 0.6225453615188599, "avg_line_length": 26.912134170532227, "blob_id": "e3e998096c2fa9f32ebdd36979ae12ce9f2e2df6", "content_id": "d9910c94a7e7305a9276972e7f89bcf2de103643", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6671, "license_type": "permissive", "max_line_length": 89, "num_lines": 239, "path": "/examples/rl_algos/ppo/pusher_ppo.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch SAC on Pusher2D3DofGoalCompoEnv.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\n\nimport os\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn.utils.data_management import SimpleReplayBuffer\n\nfrom robolearn_gym_envs.pybullet import Pusher2D3DofGoalCompoEnv\n\nfrom robolearn.torch.algorithms.rl_algos.ppo import PPO\n\nfrom robolearn.torch.models import NNQFunction\n\nfrom robolearn.torch.policies import TanhGaussianPolicy\n\nimport argparse\nimport joblib\n\nnp.set_printoptions(suppress=True, precision=4)\n# np.seterr(all='raise') # WARNING RAISE ERROR IN NUMPY\n\nTend = 3.0 # Seconds\n\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 5 #10\nPATHS_PER_EVAL = 3 #3\nBATCH_SIZE = 256\n\n# SEED = 10\nSEED = 110\n# NP_THREADS = 6\n\nPOLICY = TanhGaussianPolicy\n\n\nexpt_params = dict(\n algo_name=PPO.__name__,\n policy_name=POLICY.__name__,\n algo_params=dict(\n # Common RL algorithm params\n rollouts_per_epoch=PATHS_PER_EPOCH,\n num_steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n num_epochs=5000, # n_epochs\n num_updates_per_train_call=1, # How to many run algorithm train fcn\n num_steps_per_eval=PATHS_PER_EVAL * PATH_LENGTH,\n min_steps_start_train=BATCH_SIZE, # Min nsteps to start to train (or batch_size)\n # min_start_eval=PATHS_PER_EPOCH * PATH_LENGTH, # Min nsteps to start to eval\n min_start_eval=1, # Min nsteps to start to eval\n # EnvSampler params\n max_path_length=PATH_LENGTH, # max_path_length\n render=False,\n # SAC params\n action_prior='uniform',\n entropy_scale=1.0e-0,\n\n policy_lr=1e-4,\n qf_lr=1e-4,\n # soft_target_tau=1.e-3,\n\n policy_mean_regu_weight=1e-3,\n policy_std_regu_weight=1e-3,\n policy_pre_activation_weight=0.,\n\n discount=0.99,\n reward_scale=5.0e+1,\n ),\n net_size=64,\n replay_buffer_size=1e6,\n)\n\n\nenv_params = dict(\n is_render=False,\n obs_with_img=False,\n goal_poses=None, # It will be setted later\n rdn_goal_pose=True,\n tgt_pose=None, # It will be setted later\n rdn_tgt_object_pose=True,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n # obs_distances=False, # If True obs contain 'distance' vectors instead poses\n obs_distances=True, # If True obs contain 'distance' vectors instead poses\n tgt_cost_weight=2.0, #1.5\n goal_cost_weight=1.0,\n ctrl_cost_weight=1.0e-3,\n use_log_distances=True,\n # use_log_distances=False,\n log_alpha=1.e-1, # In case use_log_distances=True\n # max_time=PATH_LENGTH*DT,\n max_time=None,\n subtask=None,\n seed=SEED,\n)\n\n\ndef experiment(variant):\n\n # os.environ['OMP_NUM_THREADS'] = str(NP_THREADS)\n\n np.random.seed(SEED)\n\n ptu.set_gpu_mode(variant['gpu'])\n ptu.seed(SEED)\n\n goal = variant['env_params'].get('goal')\n variant['env_params']['goal_poses'] = \\\n [goal, (goal[0], 'any'), ('any', goal[1])]\n variant['env_params'].pop('goal')\n\n env = NormalizedBoxEnv(\n Pusher2D3DofGoalCompoEnv(**variant['env_params']),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n if variant['log_dir']:\n params_file = os.path.join(variant['log_dir'], 'params.pkl')\n data = joblib.load(params_file)\n start_epoch = data['epoch']\n qf = data['qf']\n policy = data['policy']\n env._obs_mean = data['obs_mean']\n env._obs_var = data['obs_var']\n else:\n start_epoch = 0\n net_size = variant['net_size']\n\n qf = NNQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size]\n )\n policy = POLICY(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size],\n )\n\n # Clamp model parameters\n qf.clamp_all_params(min=-0.003, max=0.003)\n policy.clamp_all_params(min=-0.003, max=0.003)\n\n replay_buffer = SimpleReplayBuffer(\n max_replay_buffer_size=variant['replay_buffer_size'],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n\n algorithm = PPO(\n env=env,\n policy=policy,\n qf=qf,\n # replay_buffer=replay_buffer,\n # batch_size=BATCH_SIZE,\n eval_env=env,\n save_environment=False,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n # algorithm.pretrain(PATH_LENGTH*2)\n algorithm.train(start_epoch=start_epoch)\n\n return algorithm\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=25)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n # GPU arguments\n parser.add_argument('--gpu', action=\"store_true\")\n # Other arguments\n parser.add_argument('--render', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n expt_variant = expt_params\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = 'pusher'\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n expt_variant['env_params'] = env_params\n expt_variant['env_params']['is_render'] = args.render\n\n # TODO: MAKE THIS A SCRIPT ARGUMENT\n expt_variant['env_params']['goal'] = (0.65, 0.65)\n expt_variant['env_params']['tgt_pose'] = (0.5, 0.25, 1.4660)\n\n expt_variant['log_dir'] = args.log_dir\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algo = experiment(expt_variant)\n\n # input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.49926382303237915, "alphanum_fraction": 0.5066255927085876, "avg_line_length": 29.004016876220703, "blob_id": "19be3c4d158e68db3d398517ac31014b426f1719", "content_id": "46b9e4a704845f72bf6cb8e3fbc039cc683a64c2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7471, "license_type": "permissive", "max_line_length": 84, "num_lines": 249, "path": "/robolearn/torch/policies/tanh_gaussian_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis code is based on: https://github.com/vitchyr/rlkit\n\"\"\"\nimport math\nimport numpy as np\nimport torch\nfrom torch import nn as nn\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.torch.utils.nn import Mlp\nfrom robolearn.models.policies import ExplorationPolicy\nfrom torch.distributions import Normal\nfrom robolearn.torch.utils.distributions import TanhNormal\nfrom robolearn.torch.utils.distributions import TanhMultivariateNormal\n\n# LOG_SIG_MAX = 0.0 # 2\n# LOG_SIG_MIN = -3.0 # 20\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\nEPS = 1e-8\n\n\nclass TanhGaussianPolicy(Mlp, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n action, policy_dict = policy(obs)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n hidden_sizes,\n std=None,\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0,\n output_w_init='xavier_normal',\n output_b_init_val=0,\n **kwargs\n ):\n \"\"\"\n\n Args:\n obs_dim:\n action_dim:\n hidden_sizes:\n std:\n hidden_w_init:\n hidden_b_init_val:\n output_w_init:\n output_b_init_val:\n **kwargs:\n \"\"\"\n self.save_init_params(locals())\n super(TanhGaussianPolicy, self).__init__(\n hidden_sizes,\n input_size=obs_dim,\n output_size=action_dim,\n hidden_w_init=hidden_w_init,\n hidden_b_init_val=hidden_b_init_val,\n output_w_init=output_w_init,\n output_b_init_val=output_b_init_val,\n **kwargs\n )\n ExplorationPolicy.__init__(self, action_dim)\n\n self.log_std = None\n self.std = std\n if std is None:\n last_hidden_size = obs_dim\n if len(hidden_sizes) > 0:\n last_hidden_size = hidden_sizes[-1]\n self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim)\n ptu.layer_init(\n layer=self.last_fc_log_std,\n option=output_w_init,\n activation='linear',\n b=output_b_init_val\n )\n else:\n self.log_std = math.log(std)\n assert LOG_SIG_MIN <= self.log_std <= LOG_SIG_MAX\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n\n def get_action(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n return self.eval_np(obs_np, **kwargs)\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool): True for using mean. False, sample from dist.\n return_log_prob (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n for i, fc in enumerate(self.fcs):\n h = self.hidden_activation(fc(h))\n mean = self.last_fc(h)\n\n if self.std is None:\n log_std = self.last_fc_log_std(h)\n # # log_std option 1:\n # log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n # log_std option 2:\n log_std = torch.tanh(log_std)\n log_std = \\\n LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN) * (log_std + 1)\n\n std = torch.exp(log_std)\n else:\n std = self.std\n log_std = self.log_std\n\n pre_tanh_value = None\n log_prob = None\n\n if deterministic:\n action = torch.tanh(mean)\n else:\n \"\"\"\n # Using this distribution instead of TanhMultivariateNormal\n # because it has Diagonal Covariance.\n # Then, a collection of n independent Gaussian r.v.\n tanh_normal = TanhNormal(mean, std)\n\n # # It is the Lower-triangular factor of covariance because it is\n # # Diagonal Covariance\n # scale_trils = torch.stack([torch.diag(m) for m in std])\n # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)\n\n if return_log_prob:\n log_prob = tanh_normal.log_prob(\n action,\n pre_tanh_value=pre_tanh_value\n )\n\n # THE FOLLOWING ONLY WITH TanhNormal\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n \"\"\"\n\n noise = self._normal_dist.sample((nbatch,))\n\n pre_tanh_value = std*noise + mean\n\n action = torch.tanh(pre_tanh_value)\n\n if return_log_prob:\n log_prob = -0.5 * (((pre_tanh_value-mean)/(std+EPS))**2\n + 2*log_std + math.log(2*math.pi))\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n log_prob -= (\n torch.log(\n # torch.clamp(1. - action**2, 0, 1)\n clip_but_pass_gradient(1. - action**2, 0, 1)\n + 1.e-6\n )\n ).sum(dim=-1, keepdim=True)\n\n info_dict = dict(\n mean=mean,\n std=std,\n log_std=log_std,\n log_prob=log_prob,\n pre_tanh_value=pre_tanh_value,\n )\n return action, info_dict\n\n def log_action(self, action, obs):\n \"\"\"\n\n Args:\n action:\n obs:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n # #TODO: CHECK THIS FUNCTION\n # h = obs\n # for i, fc in enumerate(self.fcs):\n # h = self.hidden_activation(fc(h))\n #\n # mean = self.last_fc(h)\n #\n # if self.std is None:\n # log_std = self.last_fc_log_std(h)\n # log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n # std = torch.exp(log_std)\n # else:\n # std = self.std\n #\n # # tanh_normal = TanhNormal(mean, std)\n # # log_prob = torch.sum(tanh_normal.log_prob(action), dim=-1, keepdim=True)\n #\n # scale_trils = torch.stack([torch.diag(m) for m in std])\n # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)\n # log_prob = tanh_normal.log_prob(action).unsqueeze_(-1)\n #\n # return log_prob\n #\n # # z = (action - mean)/stds\n # # return -0.5 * torch.sum(torch.mul(z, z), dim=-1, keepdim=True)\n\n\ndef clip_but_pass_gradient(x, l=-1., u=1.):\n clip_up = (x > u).to(ptu.device, dtype=torch.float32)\n clip_low = (x < l).to(ptu.device, dtype=torch.float32)\n return x + ((u - x)*clip_up + (l - x)*clip_low).detach()\n" }, { "alpha_fraction": 0.8166286945343018, "alphanum_fraction": 0.8166286945343018, "avg_line_length": 35.54166793823242, "blob_id": "cb545f6bb502d3be9c9b3ffe6bf6822534f0ed19", "content_id": "b39239bfab6eb38ae25db7d35a543031cb1ad85c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "permissive", "max_line_length": 77, "num_lines": 24, "path": "/robolearn/utils/plots/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .canvas_draw import canvas_draw\nfrom .plt_pause import plt_pause\n\n# General plots\nfrom .core import subplots\nfrom .core import set_latex_plot\nfrom .core import get_csv_data\n\n# Rollout Plots\nfrom .rollout_plots import plot_reward_composition\nfrom .rollout_plots import plot_reward_iu\nfrom .rollout_plots import plot_weigths_unintentionals\nfrom .rollout_plots import plot_q_vals\n\n# Training Plots\nfrom .learning_process_plots import plot_process_iu_returns\nfrom .learning_process_plots import plot_process_iu_avg_rewards\nfrom .learning_process_plots import plot_process_iu_policies\nfrom .learning_process_plots import plot_process_iu_values_errors\nfrom .learning_process_plots import plot_process_iu_alphas\nfrom .learning_process_plots import plot_process_general_data\n\n# Multiple Training plots\nfrom .multiple_learning_process_plots import plot_multiple_process_iu_returns\n\n" }, { "alpha_fraction": 0.6194531321525574, "alphanum_fraction": 0.6234872341156006, "avg_line_length": 37.44827651977539, "blob_id": "1add7e3cc93ccdff857e91133d1c7054ad62d7bd", "content_id": "b6530cc21d0c2dadb08788716682d70dc7d96adc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2231, "license_type": "permissive", "max_line_length": 95, "num_lines": 58, "path": "/robolearn/torch/utils/data_management/normalizer.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nimport numbers\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import Module\n# from robolearn.utils.data_management import Normalizer\nimport robolearn.torch.utils.pytorch_util as ptu\n\n\nclass Normalizer(Module):\n def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):\n super(Normalizer, self).__init__()\n if isinstance(normalized_shape, numbers.Integral):\n normalized_shape = (normalized_shape,)\n self.normalized_shape = torch.Size(normalized_shape)\n self.eps = eps\n self.elementwise_affine = elementwise_affine\n if self.elementwise_affine:\n self.weight = Parameter(torch.Tensor(*normalized_shape))\n self.bias = Parameter(torch.Tensor(*normalized_shape))\n else:\n self.register_parameter('weight', None)\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.elementwise_affine:\n self.weight.data.fill_(1)\n self.bias.data.zero_()\n\n def forward(self, input):\n return torch.batch_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)\n\n def extra_repr(self):\n return '{normalized_shape}, eps={eps}, ' \\\n 'elementwise_affine={elementwise_affine}'.format(**self.__dict__)\n\n def normalize(self, v, clip_range=None):\n if not self.synchronized:\n self.synchronize()\n if clip_range is None:\n clip_range = self.default_clip_range\n mean = torch.tensor(self.mean, requires_grad=False)\n std = torch.tensor(self.std, requires_grad=False)\n if v.dim() == 2:\n # Unsqueeze along the batch use automatic broadcasting\n mean = mean.unsqueeze(0)\n std = std.unsqueeze(0)\n return torch.clamp((v - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n if not self.synchronized:\n self.synchronize()\n mean = torch.tensor(self.mean, requires_grad=False)\n std = torch.tensor(self.std, requires_grad=False)\n if v.dim() == 2:\n mean = mean.unsqueeze(0)\n std = std.unsqueeze(0)\n return mean + v * std\n\n" }, { "alpha_fraction": 0.484856516122818, "alphanum_fraction": 0.4886975586414337, "avg_line_length": 35.70988464355469, "blob_id": "0434d3125d16b85fd7807aee6ab6f7741fe30bba", "content_id": "dafda025d0a834185a8322c9e08f45776a57a828", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22650, "license_type": "permissive", "max_line_length": 113, "num_lines": 617, "path": "/robolearn/torch/policies/tanh_gaussian_mixture_multi_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import math\nimport torch\nfrom torch import nn as nn\nfrom torch.distributions import Normal\nfrom torch.distributions import MultivariateNormal\nfrom torch.distributions import Multinomial\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom collections import OrderedDict\nfrom itertools import chain\n\nLOG_SIG_MAX = 2\n# LOG_SIG_MIN = -20\nLOG_SIG_MIN = -3.0\n\nLOG_MIX_COEFF_MIN = -10\nLOG_MIX_COEFF_MAX = -4.5e-5\n\nEPS = 1e-12\n\n\nclass TanhGaussianMixtureMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n\n \"\"\"\n def __init__(self,\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=1e-2,\n output_w_init='xavier_normal',\n output_b_init_val=1e-2,\n pol_output_activation='linear',\n mix_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n epsilon=1e-6,\n ):\n self.save_init_params(locals())\n super(TanhGaussianMixtureMultiPolicy, self).__init__()\n ExplorationPolicy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n mixture_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # Multi-Policy Log-Stds Last Layers\n self.stds = stds\n self.log_std = list()\n if stds is None:\n self._pfc_log_std_lasts = list()\n for pol_idx in range(self._n_subpolicies):\n last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc_log_std,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std)\n self._pfc_log_std_lasts.append(last_pfc_log_std)\n self.add_policies_module(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std, idx=pol_idx)\n\n else:\n for std in stds:\n self.log_std.append(torch.log(stds))\n assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX\n\n # ############# #\n # Mixing Layers #\n # ############# #\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n softmax_weights = True\n if softmax_weights:\n self.mfc_softmax = nn.Softmax(dim=1)\n else:\n self.mfc_softmax = None\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n self._epsilon = epsilon\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n\n def get_action(self, obs_np, **kwargs):\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool):\n return_log_prob (bool):\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n # pol_idx = int(0)\n\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._mixture_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n # Last Mean Layers\n means_list = \\\n [(self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)]\n\n means = torch.cat(means_list, dim=1)\n\n # Last Log-Std Layers\n if self.stds is None:\n log_stds_list = [\n (self._pol_output_activation(\n self._pfc_log_std_lasts[pp](hs[pp])\n )\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)]\n\n log_stds = torch.cat(log_stds_list, dim=1)\n log_stds = torch.clamp(log_stds, min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n stds = torch.exp(log_stds)\n variances = stds**2\n\n else:\n stds = self.stds\n variances = stds**2\n log_stds = self.log_std\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = h.clone()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n log_mixture_coeff = \\\n self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)\n\n # log_mixture_coeff = torch.clamp(log_mixture_coeff,\n # min=LOG_MIX_COEFF_MIN,\n # max=LOG_MIX_COEFF_MAX) # NxK\n\n # if self.mfc_softmax is not None:\n mixture_coeff = self.mfc_softmax(log_mixture_coeff)\n\n # mixture_coeff = torch.exp(log_mixture_coeff) \\\n # / torch.sum(torch.exp(log_mixture_coeff), dim=-1,\n # keepdim=True)\n\n if torch.isnan(log_mixture_coeff).any():\n raise ValueError('Some mixture coeff(s) is(are) NAN:',\n log_mixture_coeff)\n\n if torch.isnan(means).any():\n raise ValueError('Some means are NAN:',\n means)\n\n if torch.isnan(stds).any():\n raise ValueError('Some stds are NAN:',\n stds)\n\n if pol_idx is None:\n # TODO: CHECK IF NOT PROPAGATING GRADIENTS HERE IS A PROBLEM\n # Sample latent variables\n z = Multinomial(\n logits=log_mixture_coeff.transpose(-2, -1)\n ).sample().transpose(-2, -1) # NxK\n\n # Choose mixture component corresponding\n\n mean = torch.sum(means*z, dim=-2)\n std = torch.sum(stds*z, dim=-2)\n log_std = torch.sum(log_stds*z, dim=-2)\n variance = torch.sum(variances*z, dim=-2)\n\n else:\n index = self._pols_idxs[pol_idx]\n mean = \\\n torch.index_select(means, dim=1, index=index).squeeze(1)\n std = \\\n torch.index_select(stds, dim=1, index=index).squeeze(1)\n log_std = \\\n torch.index_select(log_stds, dim=1, index=index).squeeze(1)\n variance = \\\n torch.index_select(variances, dim=1, index=index).squeeze(1)\n\n pre_tanh_value = None\n log_prob = None\n entropy = None\n mean_action_log_prob = None\n log_probs = None\n pre_tanh_values = None\n\n if deterministic:\n action = torch.tanh(mean)\n actions = torch.tanh(means)\n else:\n\n noise = self._normal_dist.sample((nbatch,))\n pre_tanh_value = std*noise + mean\n pre_tanh_values = stds*noise.unsqueeze(1) + means\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n if return_log_prob:\n # temp_pre_tanh_vals = pre_tanh_values\n # temp_actions = actions\n temp_pre_tanh_vals = pre_tanh_value.unsqueeze(-2).expand((nbatch, self.n_heads, self.action_dim))\n temp_actions = action.unsqueeze(-2).expand((nbatch, self.n_heads, self.action_dim))\n\n # Log probability: Sub-Policies | log(x|z)\n # log_probs = -((pre_tanh_values - means) ** 2) / (2 * variances) \\\n temp_log_probs = -((temp_pre_tanh_vals - means) ** 2) / (2 * variances) \\\n - log_stds - math.log(math.sqrt(2 * math.pi))\n # log_probs -= torch.log(1. - temp_actions**2 + self._epsilon)\n\n # Log probability: Main Policy\n log_prob = (torch.logsumexp(temp_log_probs.detach() + log_mixture_coeff,\n dim=-2, keepdim=True)\n - torch.logsumexp(log_mixture_coeff, dim=-2,\n keepdim=True)\n ).squeeze(-2)\n log_prob -= torch.log(1. - action**2 + self._epsilon)\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n log_probs = -((pre_tanh_values - means) ** 2) / (2 * variances) \\\n - log_stds - math.log(math.sqrt(2 * math.pi))\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n if (torch.abs(log_probs) > 1e5).any():\n print('---MEAN0--')\n print(means[:, 0, :])\n print('-----')\n print('-----')\n print('-----')\n print('---MEAN1--')\n print(means[:, 1, :])\n print('-----')\n print('-----')\n print('-----')\n print('--STD---')\n print(stds[:, 1, :])\n print('-----')\n print('-----')\n print('-----')\n print('--PRE_TANH---')\n print(temp_pre_tanh_vals[:, 1, :])\n print('-----')\n print('-----')\n print('-----')\n print('--LOG_PROB---')\n print(log_probs[:, 1])\n raise ValueError\n\n if torch.isnan(log_prob).any():\n raise ValueError('LOG_PROB NAN')\n\n if torch.isnan(log_probs).any():\n raise ValueError('LOG_PROBS NAN')\n\n if torch.isnan(action).any():\n raise ValueError('ACTION NAN')\n\n if torch.isnan(actions).any():\n raise ValueError('ACTION NAN')\n\n info_dict = dict(\n mean=mean,\n log_std=log_std,\n log_prob=log_prob,\n entropy=entropy,\n std=std,\n mean_action_log_prob=mean_action_log_prob,\n pre_tanh_value=pre_tanh_value,\n # log_mixture_coeff=log_mixture_coeff,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_means=means,\n pol_stds=stds,\n pol_log_stds=log_stds,\n pol_log_probs=log_probs,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n def log_action(self, actions, obs, pol_idx=None):\n raise NotImplementedError\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n" }, { "alpha_fraction": 0.8484848737716675, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 32, "blob_id": "721a6609999e83a9017c79904c28277898cb049f", "content_id": "226fe1c966c438055fdd5b04fdeafafbb028f37c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "permissive", "max_line_length": 32, "num_lines": 1, "path": "/robolearn/torch/algorithms/rl_algos/reinforce/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .reinforce import Reinforce\n" }, { "alpha_fraction": 0.5415664315223694, "alphanum_fraction": 0.5454691052436829, "avg_line_length": 37.329917907714844, "blob_id": "fd995bb8cb1761179bb9ac1d31ea76bda9bb68be", "content_id": "bb16ff279cb4bea288ce2e8f7b085962b4e11c03", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18705, "license_type": "permissive", "max_line_length": 92, "num_lines": 488, "path": "/robolearn/torch/algorithms/rl_algos/sql/iu_sql.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Haarnoja's TensorFlow SQL implementation\n\nhttps://github.com/haarnoja/softqlearning\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom collections import OrderedDict\n\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfrom robolearn.algorithms.rl_algos import IncrementalRLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.utils import eval_util\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils.samplers import InPlacePathSampler\nfrom robolearn.torch.sql.policies import MakeDeterministic\nfrom robolearn.torch.sql.kernel import adaptive_isotropic_gaussian_kernel\nfrom robolearn.torch.utils.ops import log_sum_exp\n\nEPS = 1e-6\n\n\ndef assert_shape(tensor, expected_shape):\n tensor_shape = list(tensor.shape)\n assert len(tensor_shape) == len(expected_shape)\n assert all([a == b for a, b in zip(tensor_shape, expected_shape)])\n\n\nclass IUSQL(IncrementalRLAlgorithm, TorchAlgorithm):\n \"\"\"Intentional-Unintentional Soft Q-learning (IU-SQL).\n\n \"\"\"\n def __init__(self,\n env,\n u_qfs,\n u_policies,\n i_qf=None,\n i_policy=None,\n exploration_pol_id=0,\n iu_mode='composition',\n\n qf_lr=1e-3,\n policy_lr=1e-3,\n optimizer_class=optim.Adam,\n use_hard_updates=False,\n hard_update_period=1000,\n soft_target_tau=0.001,\n\n value_n_particles=16,\n kernel_fn=adaptive_isotropic_gaussian_kernel,\n kernel_n_particles=16,\n kernel_update_ratio=0.5,\n plotter=None,\n eval_deterministic=True,\n **kwargs):\n \"\"\"\n\n Args:\n env:\n qf (`robolearn.PyTorchModule`): Q-function approximator.\n policy (`robolearn.PyTorchModule`):\n qf_lr (`float`): Learning rate used for the Q-function approximator.\n use_hard_updates (`bool`): Use a hard rather than soft update.\n hard_update_period (`int`): How many gradient steps before copying\n the parameters over. Used if `use_hard_updates` is True.\n soft_target_tau (`float`): Soft target tau to update target QF.\n Used if `use_hard_updates` is False.\n value_n_particles (`int`): The number of action samples used for\n estimating the value of next state.\n kernel_fn (function object): A function object that represents\n a kernel function.\n kernel_n_particles (`int`): Total number of particles per state\n used in SVGD updates.\n plotter (`MultiQFPolicyPlotter`): Plotter instance to be used for\n visualizing Q-function during training.\n eval_deterministic: Evaluate with deterministic version of current\n _i_policy.\n **kwargs:\n \"\"\"\n self._n_unintentional = len(u_qfs)\n\n if i_policy is None:\n self._i_policy = u_policies[exploration_pol_id]\n else:\n self._i_policy = i_policy\n\n if eval_deterministic:\n eval_policy = MakeDeterministic(self._i_policy)\n else:\n eval_policy = self._i_policy\n\n if i_qf is None:\n self._i_qf = u_qfs[exploration_pol_id]\n else:\n self._i_qf = i_qf\n\n self._iu_mode = iu_mode\n if iu_mode == 'composition':\n self._i_target_qf = None\n else:\n self._i_target_qf = self._i_qf.copy()\n\n super(IUSQL, self).__init__(\n env=env,\n exploration_policy=self._i_policy,\n eval_policy=eval_policy,\n **kwargs\n )\n\n # Unintentional Tasks\n self._u_policies = u_policies\n self._u_qfs = u_qfs\n self._u_target_qfs = [qf.copy() for qf in self._u_qfs]\n\n # Plotter\n self._epoch_plotter = plotter\n\n # Env data\n self._action_dim = self.explo_env.action_space.low.size\n self._obs_dim = self.explo_env.observation_space.low.size\n\n # Optimize Q-fcn\n self._u_qf_optimizers = [optimizer_class(qf.parameters(), lr=qf_lr, )\n for qf in self._u_qfs]\n self._value_n_particles = value_n_particles\n\n if iu_mode == 'composition':\n self._i_qf_optimizer = None\n else:\n self._i_qf_optimizer = optimizer_class(self._i_qf.parameters(),\n lr=qf_lr,)\n\n # Optimize Sampling Policy\n self._u_policy_optimizers = [optimizer_class(policy.parameters(),\n lr=policy_lr, )\n for policy in self._u_policies]\n if iu_mode == 'composition':\n self._i_policy_optimizer = \\\n optimizer_class(self._i_policy.parameters(),\n lr=policy_lr, )\n else:\n self._i_policy_optimizer = None\n\n self._kernel_n_particles = kernel_n_particles\n self._kernel_update_ratio = kernel_update_ratio\n self._kernel_fn = kernel_fn\n\n # Optimize target Q-fcn\n self.use_hard_updates = use_hard_updates\n self.hard_update_period = hard_update_period\n self.soft_target_tau = soft_target_tau\n\n # Evaluation Sampler (One for each unintentional\n self.eval_samplers = [\n InPlacePathSampler(env=env, policy=eval_policy,\n total_samples=self.num_steps_per_eval + self.max_path_length,\n max_path_length=self.max_path_length, )\n for eval_policy in self._u_policies\n ]\n\n def pretrain(self):\n # Match target Qfcn with current one\n for unint_idx in range(self._n_unintentional):\n self._update_target_softq_fcn(unint_idx=unint_idx)\n\n if self._iu_mode == 'composition':\n pass\n else:\n self._update_target_softq_fcn(unint_idx=None)\n\n def _do_training(self):\n batch = self.get_batch()\n\n # Update Unintentional Networks\n for unint_idx in range(self._n_unintentional):\n bellman_residual = self._update_softq_fcn(batch, unint_idx)\n surrogate_cost = self._update_sampling_policy(batch, unint_idx)\n self._update_target_softq_fcn(unint_idx=unint_idx)\n\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n self.eval_statistics['[%d] Bellman Residual (QFcn)' % unint_idx] = \\\n np.mean(ptu.get_numpy(bellman_residual))\n self.eval_statistics['[%d] Surrogate Reward (Policy)' % unint_idx] = \\\n np.mean(ptu.get_numpy(surrogate_cost))\n\n # Update Intentional Networks\n if self._iu_mode == 'composition':\n pass\n else:\n bellman_residual = self._update_softq_fcn(batch, unint_idx=None)\n self.eval_statistics['Bellman Residual (QFcn)'] = \\\n np.mean(ptu.get_numpy(bellman_residual))\n\n if self._iu_mode == 'composition':\n surrogate_cost = self._update_sampling_policy(batch, unint_idx=None)\n self.eval_statistics['Surrogate Reward (Intentional Policy)'] = \\\n np.mean(ptu.get_numpy(surrogate_cost))\n else:\n pass\n\n if self._iu_mode == 'composition':\n pass\n else:\n self._update_target_softq_fcn(unint_idx=None)\n\n # SoftQ Functions\n def _update_softq_fcn(self, batch, unint_idx=None):\n \"\"\"\n Q-fcn update\n Args:\n batch:\n\n Returns:\n\n \"\"\"\n\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n if unint_idx is None:\n rewards = batch['rewards']\n else:\n rewards = batch['reward_vectors'][:, unint_idx].unsqueeze(-1) \\\n * self.reward_scale\n terminals = batch['terminals']\n n_batch = obs.shape[0]\n\n if unint_idx is None:\n target_q_fcn = self._i_target_qf\n q_fcn = self._i_qf\n q_fcn_optimizer = self._i_qf_optimizer\n else:\n target_q_fcn = self._u_target_qfs[unint_idx]\n q_fcn = self._u_qfs[unint_idx]\n q_fcn_optimizer = self._u_qf_optimizers[unint_idx]\n\n # The value of the next state is approximated with uniform act. samples.\n uniform_dist = torch.distributions.Uniform(ptu.FloatTensor([-1.0]),\n ptu.FloatTensor([1.0]))\n target_actions = uniform_dist.sample((self._value_n_particles,\n self._action_dim)).squeeze()\n q_value_targets = \\\n target_q_fcn(\n next_obs.unsqueeze(1).expand(n_batch,\n self._value_n_particles,\n self._obs_dim),\n target_actions.unsqueeze(0).expand(n_batch,\n self._value_n_particles,\n self._action_dim)\n ).squeeze()\n assert_shape(q_value_targets, [n_batch, self._value_n_particles])\n\n q_values = q_fcn(obs, actions).squeeze()\n assert_shape(q_values, [n_batch])\n\n # Equation 10: Vsoft: 'Empirical' mean from q_vals_tgts particles\n next_value = log_sum_exp(q_value_targets.squeeze(), dim=1)\n assert_shape(next_value, [n_batch])\n\n # Importance _weights add just a constant to the value.\n next_value -= torch.log(ptu.FloatTensor([self._value_n_particles]))\n next_value += self._action_dim * np.log(2)\n\n # \\hat Q in Equation 11\n # ys = (self.reward_scale * rewards.squeeze() + # Current reward\n ys = (rewards.squeeze() + # Scale reward is already done by base class\n (1 - terminals.squeeze()) * self.discount * next_value\n ).detach() # TODO: CHECK IF I AM DETACHING GRADIENT!!!\n assert_shape(ys, [n_batch])\n\n # Equation 11: Soft-Bellman error\n bellman_residual = 0.5 * torch.mean((ys - q_values) ** 2)\n\n # Gradient descent on _i_policy parameters\n q_fcn_optimizer.zero_grad() # Zero all model var grads\n bellman_residual.backward() # Compute gradient of surrogate_loss\n q_fcn_optimizer.step() # Update model vars\n\n return bellman_residual\n\n # Sampling Policy\n def _update_sampling_policy(self, batch, unint_idx=None):\n \"\"\"\n Policy update: SVGD\n Returns:\n\n \"\"\"\n obs = batch['observations']\n next_obs = batch['next_observations']\n n_batch = obs.shape[0]\n\n if unint_idx is None:\n policy = self._i_policy\n q_fcn = self._i_qf\n pol_optimizer = self._i_policy_optimizer\n else:\n policy = self._u_policies[unint_idx]\n q_fcn = self._u_qfs[unint_idx]\n pol_optimizer = self._u_policy_optimizers[unint_idx]\n\n actions = policy(\n obs.unsqueeze(1).expand(n_batch,\n self._kernel_n_particles,\n self._obs_dim)\n )\n # actions = actions[0] # For policies that return tuple\n assert_shape(actions,\n [n_batch, self._kernel_n_particles, self._action_dim])\n\n # SVGD requires computing two empirical expectations over actions\n # (see Appendix C1.1.). To that end, we first sample a single set of\n # actions, and later split them into two sets: `fixed_actions` are used\n # to evaluate the expectation indexed by `j` and `updated_actions`\n # the expectation indexed by `i`.\n n_updated_actions = \\\n int(self._kernel_n_particles*self._kernel_update_ratio)\n n_fixed_actions = self._kernel_n_particles - n_updated_actions\n\n fixed_actions, updated_actions \\\n = torch.split(actions, [n_fixed_actions, n_updated_actions], dim=1)\n # Equiv: fixed_actions = tf.stop_gradient(fixed_actions)\n fixed_actions = torch.tensor(fixed_actions.detach(), requires_grad=True)\n assert_shape(fixed_actions,\n [n_batch, n_fixed_actions, self._action_dim])\n assert_shape(updated_actions,\n [n_batch, n_updated_actions, self._action_dim])\n\n svgd_target_values = \\\n (q_fcn(next_obs.unsqueeze(1).expand(n_batch,\n n_fixed_actions,\n self._obs_dim),\n fixed_actions)).squeeze()\n\n # Target log-density. Q_soft in Equation 13:\n squash_correction = torch.sum(torch.log(1 - fixed_actions**2 + EPS),\n dim=-1)\n log_p = svgd_target_values + squash_correction\n\n # Backward log_p\n grad_log_p = torch.autograd.grad(log_p,\n fixed_actions,\n grad_outputs=torch.ones_like(log_p),\n create_graph=False)[0]\n grad_log_p = torch.unsqueeze(grad_log_p, dim=2)\n assert_shape(grad_log_p,\n [n_batch, n_fixed_actions, 1, self._action_dim])\n\n kernel_dict = self._kernel_fn(xs=fixed_actions,\n ys=updated_actions)\n\n # Kernel function in Eq. 13:\n kappa = torch.unsqueeze(kernel_dict['output'], dim=3)\n assert_shape(kappa,\n [n_batch, n_fixed_actions, n_updated_actions, 1])\n\n # Stein Variational Gradient in Eq. 13:\n action_gradients = \\\n torch.mean(kappa * grad_log_p + kernel_dict['gradient'], dim=1)\n assert_shape(action_gradients,\n [n_batch, n_updated_actions, self._action_dim])\n\n # Propagate the gradient through the _i_policy network (Equation 14).\n gradients = torch.autograd.grad(updated_actions,\n policy.parameters(),\n grad_outputs=action_gradients,\n create_graph=False)\n\n # TODO: Check a better way to do this\n for pp, (w, g) in enumerate(zip(policy.parameters(),\n gradients)):\n if pp == 0:\n surrogate_loss = torch.sum(w*g)\n else:\n surrogate_loss += torch.sum(w*g)\n\n # Gradient descent on _i_policy parameters\n pol_optimizer.zero_grad() # Zero all model var grads\n (-surrogate_loss).backward() # Compute gradient of surrogate_loss\n pol_optimizer.step() # Update model vars\n\n return -surrogate_loss\n\n # Target Q-Functions\n def _update_target_softq_fcn(self, unint_idx=None):\n \"\"\"\n Update the Target SoftQ function\n Args:\n unint_idx: ID of the unintentional task.\n None updates for the intentional one.\n\n Returns: None\n\n \"\"\"\n if unint_idx is None:\n target_q_fcn = self._i_target_qf\n q_fcn = self._i_qf\n else:\n target_q_fcn = self._u_target_qfs[unint_idx]\n q_fcn = self._u_qfs[unint_idx]\n\n if self.use_hard_updates:\n # print(self._n_total_train_steps, self.hard_update_period)\n if self._n_total_train_steps % self.hard_update_period == 0:\n ptu.copy_model_params_from_to(q_fcn,\n target_q_fcn)\n else:\n ptu.soft_update_from_to(q_fcn, target_q_fcn,\n self.soft_target_tau)\n\n @property\n def torch_models(self):\n if self._i_target_qf is None:\n target_i_q_fcn = []\n else:\n target_i_q_fcn = [self._i_target_qf]\n\n return [self._i_policy] + self._u_policies + \\\n [self._i_qf] + self._u_qfs + \\\n target_i_q_fcn + self._u_target_qfs\n\n def get_epoch_snapshot(self, epoch):\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n self._epoch_plotter.save_figure(epoch)\n\n snapshot = super(IUSQL, self).get_epoch_snapshot(epoch)\n snapshot.update(\n policy=self._i_policy,\n qf=self._i_qf,\n target_qf=self._i_target_qf,\n u_policies=self._u_policies,\n u_qfs=self._u_qfs,\n target_uqfs=self._u_target_qfs,\n )\n return snapshot\n\n def evaluate(self, epoch):\n statistics = OrderedDict()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n test_paths = [None for _ in range(self._n_unintentional)]\n for demon in range(self._n_unintentional):\n logger.log(\"[U-%02d] Collecting samples for evaluation\" % demon)\n test_paths[demon] = self.eval_samplers[demon].obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths[demon], stat_prefix=\"[U-%02d] Test\" % demon,\n ))\n average_returns = eval_util.get_average_returns(test_paths[demon])\n statistics['[U-%02d] AverageReturn' % demon] = average_returns\n\n logger.log(\"[I] Collecting samples for evaluation\")\n i_test_path = self.eval_sampler.obtain_samples()\n statistics.update(eval_util.get_generic_path_information(\n i_test_path, stat_prefix=\"[I] Test\",\n ))\n\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n if hasattr(self.explo_env, \"log_diagnostics\"):\n # TODO: CHECK ENV LOG_DIAGNOSTICS\n print('TODO: WE NEED LOG_DIAGNOSTICS IN ENV')\n # self.env.log_diagnostics(test_paths[demon])\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n for demon in range(self._n_unintentional):\n if self.render_eval_paths:\n # TODO: CHECK ENV RENDER_PATHS\n print('TODO: RENDER_PATHS')\n pass\n\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n" }, { "alpha_fraction": 0.8471337556838989, "alphanum_fraction": 0.8598726391792297, "avg_line_length": 38, "blob_id": "5a2510b33472aa27c1cfee7b07c75382b0a86a01", "content_id": "92a1e7e389c122cdcaf5519a7fafb7c63897236e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "permissive", "max_line_length": 50, "num_lines": 4, "path": "/robolearn/envs/simple_envs/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .cliff import CliffEnv\nfrom .crawler import CrawlingRobotEnv\nfrom .frozen_lake import FrozenLakeEnv\nfrom .navigation2d import Navigation2dGoalCompoEnv\n\n" }, { "alpha_fraction": 0.5558704733848572, "alphanum_fraction": 0.5623481869697571, "avg_line_length": 30.653846740722656, "blob_id": "2630e21c0ae1861b8a642960c8297b0ebe5c1ce0", "content_id": "01fcd4a767e608507d3b7d63e83d31b8f930528a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2470, "license_type": "permissive", "max_line_length": 81, "num_lines": 78, "path": "/robolearn/torch/models/transitions/linear_regression.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.utils.serializable import Serializable\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models import Transition\nfrom robolearn.torch.utils.ops.gauss_fit_joint_prior import gauss_fit_joint_prior\n\n\nclass TVLGDynamics(PyTorchModule, Transition):\n def __init__(self, horizon, obs_dim, action_dim):\n self._T = horizon\n Transition.__init__(self, obs_dim=obs_dim, action_dim=action_dim)\n\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n super(TVLGDynamics, self).__init__()\n\n self.Fm = nn.Parameter(ptu.zeros(horizon, obs_dim, obs_dim+action_dim))\n self.fv = nn.Parameter(ptu.ones(horizon, obs_dim))\n self.dyn_covar = nn.Parameter(ptu.zeros(horizon, obs_dim, obs_dim))\n\n # Prior\n self._prior = None\n\n def get_next(self, observation, action):\n pass\n\n def forward(self, obs, act, time=None, stochastic=False):\n if time is None:\n raise NotImplementedError\n\n obs_and_act = torch.cat((obs, act), dim=-1)\n\n batch = obs.shape[:-1]\n\n mean = obs_and_act.mm(torch.t(self.Fm[time])) + self.fv[time]\n cov = self.dyn_covar[time]\n\n next_obs = mean\n\n return next_obs\n\n def get_prior(self):\n return self._prior\n\n def fit(self, States, Actions, regularization=1e-6):\n \"\"\" Fit dynamics. \"\"\"\n N, T, dS = States.shape\n dA = Actions.shape[2]\n\n if N == 1:\n raise ValueError(\"Cannot fit dynamics on 1 sample\")\n\n it = slice(dS+dA)\n\n # Fit dynamics with least squares regression.\n dwts = (1.0 / N) * ptu.ones(N)\n\n for t in range(T - 1):\n Ys = torch.cat((States[:, t, :], Actions[:, t, :],\n States[:, t + 1, :]),\n dim=-1)\n\n # Obtain Normal-inverse-Wishart prior.\n mu0, Phi, mm, n0 = self._prior.eval(dS, dA, Ys)\n sig_reg = ptu.zeros((dS+dA+dS, dS+dA+dS))\n sig_reg[it, it] = regularization\n\n Fm, fv, dyn_covar = \\\n gauss_fit_joint_prior(Ys, mu0, Phi, mm, n0,\n dwts, dS+dA, dS, sig_reg)\n self.Fm[t, :, :] = Fm\n self.fv[t, :] = fv\n self.dyn_covar[t, :, :] = dyn_covar\n\n def set_prior(self, prior):\n self._prior = prior\n\n" }, { "alpha_fraction": 0.8731707334518433, "alphanum_fraction": 0.8731707334518433, "avg_line_length": 50.25, "blob_id": "0cfbec31a7ecc9951784cc0faff29724c51af49d", "content_id": "3476073614e05c1d66d03f4a923cf4a3d3fb2c42", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "permissive", "max_line_length": 58, "num_lines": 4, "path": "/robolearn/torch/utils/data_management/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .normalizer_old import TorchNormalizer\nfrom .normalizer_old import TorchFixedNormalizer\nfrom .simple_replay_buffer import SimpleReplayBuffer\nfrom .multigoal_replay_buffer import MultiGoalReplayBuffer\n" }, { "alpha_fraction": 0.5189179182052612, "alphanum_fraction": 0.5200146436691284, "avg_line_length": 33.626583099365234, "blob_id": "5326dc2e6719ceabd8a2cc8275e396ba50f18b5d", "content_id": "895252dbba31b28ffff237be182a6d0fc5945273", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5471, "license_type": "permissive", "max_line_length": 76, "num_lines": 158, "path": "/robolearn/torch/models/values/nn_multi_vfunction.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.utils.serializable import Serializable\nfrom robolearn.torch.utils.pytorch_util import np_ify\nimport robolearn.torch.utils.pytorch_util as ptu\nimport torch.nn as nn\n\nfrom robolearn.torch.utils.nn import LayerNorm\nfrom robolearn.models import VFunction\n\n\nclass NNMultiVFunction(PyTorchModule, VFunction):\n def __init__(self,\n obs_dim,\n n_vs,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0,\n output_activation='linear',\n output_w_init='xavier_normal',\n output_b_init_val=0,\n shared_layer_norm=False,\n unshared_layer_norm=False,\n layer_norm_kwargs=None,\n ):\n\n VFunction.__init__(self, obs_dim=obs_dim)\n\n self._n_vs = n_vs\n\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n super(NNMultiVFunction, self).__init__()\n\n if layer_norm_kwargs is None:\n layer_norm_kwargs = dict()\n\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._output_activation = ptu.get_activation(output_activation)\n self._shared_layer_norm = shared_layer_norm\n self._unshared_layer_norm = unshared_layer_norm\n self._sfcs = []\n self._sfc_norms = []\n self._ufcs = [list() for _ in range(self._n_vs)]\n self._ufc_norms = [list() for _ in range(self._n_vs)]\n self._ufcs_lasts = []\n\n in_size = obs_dim\n # Shared Layers\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(layer=sfc,\n activation=hidden_activation,\n b=hidden_b_init_val)\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n in_size = next_size\n\n # Unshared Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for q_idx in range(self._n_vs):\n ufc = nn.Linear(in_size, next_size)\n ptu.layer_init(layer=ufc,\n activation=hidden_activation,\n b=hidden_b_init_val)\n self.__setattr__(\"ufc{}_{}\".format(q_idx, ii), ufc)\n self._ufcs[q_idx].append(ufc)\n\n if self._unshared_layer_norm:\n ln = LayerNorm(next_size)\n tmp_txt = \"ufc{}_{}_norm\".format(q_idx, ii)\n self.__setattr__(tmp_txt, ln)\n self._ufc_norms[q_idx].append(ln)\n in_size = next_size\n\n for q_idx in range(self._n_vs):\n last_ufc = nn.Linear(in_size, 1)\n ptu.layer_init(layer=last_ufc,\n activation=output_activation,\n b=output_b_init_val)\n self.__setattr__(\"ufc_last{}\".format(q_idx), last_ufc)\n self._ufcs_lasts.append(last_ufc)\n\n def forward(self, obs, val_idxs=None):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n val_idxs (iterable):\n\n Returns:\n values (list)\n info (dict): empty dictionary\n\n \"\"\"\n if val_idxs is None:\n val_idxs = list(range(self._n_vs))\n\n h = obs\n # Shared Layers\n for i, fc in enumerate(self._sfcs):\n h = self._hidden_activation(fc(h))\n\n hs = [h.clone() for _ in val_idxs]\n # Unshared Layers\n if len(self._ufcs) > 0:\n for ii, idx in enumerate(val_idxs):\n for i, fc in enumerate(self._ufcs[idx]):\n hs[ii] = self._hidden_activation(fc(hs[ii]))\n\n values = [self._output_activation(self._ufcs_lasts[idx](hs[ii]))\n for ii, idx in enumerate(val_idxs)]\n\n return values, dict()\n\n def get_value(self, obs_np, val_idxs=None):\n if val_idxs is None:\n val_idxs = list(range(self._n_qs))\n\n values, info_dict = self.get_values(obs_np[None], val_idxs=val_idxs)\n\n values = [value[0, :] for value in values]\n\n for key, vals in info_dict.items():\n info_dict[key] = [val[0, :] if isinstance(val, np.ndarray)\n else None for val in vals]\n\n return values, info_dict\n\n def get_values(self, obs_np, val_idxs=None):\n if val_idxs is None:\n val_idxs = list(range(self._n_qs))\n\n values, info_dict = self.eval_np(obs_np, val_idxs=val_idxs)\n\n values = [np_ify(tensor) for tensor in values]\n\n for key, vals in info_dict.items():\n info_dict[key] = [np_ify(val) for val in vals]\n\n return values, info_dict\n\n @property\n def n_heads(self):\n return self._n_vs\n\n @property\n def n_values(self):\n return self._n_vs\n" }, { "alpha_fraction": 0.5371992588043213, "alphanum_fraction": 0.5399136543273926, "avg_line_length": 26.658702850341797, "blob_id": "33493205e9e78ef8b98186db2b06029645c203d1", "content_id": "e86ef3e434fc05f39924fc34cf7fbdd6d8c4a0ee", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8105, "license_type": "permissive", "max_line_length": 92, "num_lines": 293, "path": "/robolearn/torch/algorithms/rl_algos/ppo/ppo.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on ...\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom collections import OrderedDict\nfrom itertools import chain\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils import eval_util\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.utils.data_management.normalizer import RunningNormalizer\n\nimport tensorboardX\n\n\nclass PPO(RLAlgorithm, TorchAlgorithm):\n \"\"\"\n Proximal Policy Optimization\n \"\"\"\n\n def __init__(\n self,\n env,\n policy,\n qf,\n\n replay_buffer,\n normalize_obs=False,\n eval_env=None,\n\n action_prior='uniform',\n\n entropy_scale=1.,\n\n policy_lr=1e-4,\n qf_lr=1e-3,\n\n policy_weight_decay=0,\n qf_weight_decay=0,\n\n residual_gradient_weight=0,\n epoch_discount_schedule=None,\n policy_mean_regu_weight=1e-3,\n policy_std_regu_weight=1e-3,\n policy_pre_activation_weight=0.,\n\n optimizer='adam',\n # optimizer='rmsprop',\n # optimizer='sgd',\n optimizer_kwargs=None,\n\n target_hard_update_period=1000,\n tau=1e-2,\n use_soft_update=False,\n\n save_replay_buffer=False,\n eval_deterministic=True,\n log_tensorboard=False,\n **kwargs\n ):\n\n # ###### #\n # Models #\n # ###### #\n\n # Exploration Policy\n self._policy = policy\n\n # Evaluation Policy\n if eval_deterministic:\n eval_policy = MakeDeterministic(self._policy)\n else:\n eval_policy = self._policy\n\n # Observation Normalizer\n if normalize_obs:\n self._obs_normalizer = RunningNormalizer(shape=env.obs_dim)\n else:\n self._obs_normalizer = None\n\n RLAlgorithm.__init__(\n self,\n env=env,\n exploration_policy=self._policy,\n eval_env=eval_env,\n eval_policy=eval_policy,\n obs_normalizer=self._obs_normalizer,\n **kwargs\n )\n\n # Important algorithm hyperparameters\n self._action_prior = action_prior\n self._entropy_scale = entropy_scale\n\n # Q-function\n self._qf = qf\n\n # ########## #\n # Optimizers #\n # ########## #\n if optimizer.lower() == 'adam':\n optimizer_class = optim.Adam\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n amsgrad=True,\n # amsgrad=False,\n )\n elif optimizer.lower() == 'rmsprop':\n optimizer_class = optim.RMSprop\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n\n )\n else:\n raise ValueError('Wrong optimizer')\n\n # Q-function(s) optimizer(s)\n self._qf_optimizer = optimizer_class(\n self._qf.parameters(),\n lr=qf_lr,\n weight_decay=0,\n **optimizer_kwargs\n )\n\n # Policy optimizer\n self._policy_optimizer = optimizer_class(\n self._policy.parameters(),\n lr=policy_lr,\n weight_decay=0,\n **optimizer_kwargs\n )\n\n # Policy regularization coefficients (weights)\n self._policy_mean_regu_weight = policy_mean_regu_weight\n self._policy_std_regu_weight = policy_std_regu_weight\n self._policy_pre_activation_weight = policy_pre_activation_weight\n\n # Useful Variables for logging\n self.logging_pol_kl_loss = np.zeros(self.num_train_steps_per_epoch)\n self.logging_qf_loss = np.zeros(self.num_train_steps_per_epoch)\n self.logging_rewards = np.zeros(self.num_train_steps_per_epoch)\n self.logging_policy_entropy = np.zeros(self.num_train_steps_per_epoch)\n self.logging_policy_log_std = np.zeros((self.num_train_steps_per_epoch,\n self.explo_env.action_dim))\n self.logging_policy_mean = np.zeros((self.num_train_steps_per_epoch,\n self.explo_env.action_dim))\n\n self._log_tensorboard = log_tensorboard\n self._summary_writer = tensorboardX.SummaryWriter(log_dir=logger.get_snapshot_dir())\n\n def pretrain(self, n_pretrain_samples):\n # We do not require any pretrain (I think...)\n pass\n\n def _do_training(self):\n # Get batch of samples\n # batch = self.get_batch()\n cosa = self.get_exploration_paths()\n\n # # Get common data from batch\n # rewards = batch['rewards']\n # terminals = batch['terminals']\n # obs = batch['observations']\n # actions = batch['actions']\n # next_obs = batch['next_observations']\n\n # ########################### #\n # LOG Useful Intentional Data #\n # ########################### #\n\n if self._log_tensorboard:\n pass\n\n def _not_do_training(self):\n return\n\n @property\n def torch_models(self):\n networks_list = list()\n return networks_list\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Stuff to save in file.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n self._epoch_plotter.save_figure(epoch)\n\n snapshot = RLAlgorithm.get_epoch_snapshot(self, epoch)\n\n snapshot.update(\n policy=self._policy,\n qf=self._qf,\n )\n\n if self.explo_env.online_normalization or self.explo_env.normalize_obs:\n snapshot.update(\n obs_mean=self.explo_env.obs_mean,\n obs_var=self.explo_env.obs_var,\n )\n\n # Observation Normalizer\n snapshot.update(\n obs_normalizer=self._obs_normalizer,\n )\n\n # # Replay Buffer\n # if self.save_replay_buffer:\n # snapshot.update(\n # replay_buffer=self.replay_buffer,\n # )\n\n return snapshot\n\n def _update_logging_data(self):\n max_step = max(self._n_epoch_train_steps, 1)\n\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n\n def evaluate(self, epoch):\n RLAlgorithm.evaluate(self, epoch)\n\n def get_batch(self):\n pass\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n # # Add to replay buffer\n # self.replay_buffer.add_sample(\n # observation=observation,\n # action=action,\n # reward=reward,\n # terminal=terminal,\n # next_observation=next_observation,\n # agent_info=agent_info,\n # env_info=env_info,\n # )\n\n # Update observation normalizer (if applicable)\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n RLAlgorithm._handle_step(\n self,\n observation=observation,\n action=action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _end_rollout(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n\n # self.replay_buffer.terminate_episode()\n\n RLAlgorithm._end_rollout(self)\n\n" }, { "alpha_fraction": 0.4626433253288269, "alphanum_fraction": 0.4785183370113373, "avg_line_length": 37.342994689941406, "blob_id": "309c33d3c4bdb33b6557072d76eeb3475fa6615f", "content_id": "47203fa7c33fc677cc16727be0f488403627b0b4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7937, "license_type": "permissive", "max_line_length": 107, "num_lines": 207, "path": "/robolearn/utils/plots/multiple_learning_process_plots.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import subplots\nfrom robolearn.utils.plots import get_csv_data\nfrom robolearn.utils.plots import set_latex_plot\nfrom builtins import input\nimport os\nimport json\n\nLOG_FILE = 'progress.csv'\nPARAMS_FILE = 'params.pkl'\nVARIANT_FILE = 'variant.json'\n\nN_UNINTENTIONS = 2\n\n\ndef plot_multiple_process_iu_returns(\n csv_file_dict, block=False,\n max_iter=500,\n steps_per_iter=None,\n latex_plot=True,\n fig_name_prefix=None,\n):\n \"\"\"\n It plots the 'Test Returns Mean' label of the progress file.\n If algorithm of experiment is HIU, the unintentional data is considered an\n independent experiment.\n The keys of the categories dict are used for the axis title.\n The keys of the experiments dict are used for the labels in the legend.\n Args:\n csv_file_dict (dict): A dictionary of categories.\n - Category (dict): One figure per category.\n - Experiment (list): One for each figure.\n - Seed (String): Experiment run with a specific seed.\n\n dict(\n Criteria1 = dict(\n Experiment1 = list(\n ('full_path_of_experiment_with_seed_X', [-1])\n ('full_path_of_experiment_with_seed_Y', [-1])\n ('full_path_of_experiment_with_seed_Z', [-1])\n )\n Experiment2 = list(\n 'full_path_of_experiment_with_seed_X', [-1, 0, 1])\n 'full_path_of_experiment_with_seed_Y', [-1, 0, 1])\n 'full_path_of_experiment_with_seed_Z', [-1, 0, 1])\n )\n )\n Criteria2 = dict(\n Experiment1 = list(\n ('full_path_of_experiment_with_seed_X', [-1])\n ('full_path_of_experiment_with_seed_Y', [-1])\n ('full_path_of_experiment_with_seed_Z', [-1])\n )\n Experiment2 = list(\n 'full_path_of_experiment_with_seed_X', [-1, 0, 1])\n 'full_path_of_experiment_with_seed_Y', [-1, 0, 1])\n 'full_path_of_experiment_with_seed_Z', [-1, 0, 1])\n )\n )\n )\n\n block (bool): Block the figure\n max_iter:\n steps_per_iter:\n latex_plot:\n\n Returns:\n\n \"\"\"\n labels_to_plot = ['Test Returns Mean']\n labels_y_axis = ['Average Return']\n\n if latex_plot:\n set_latex_plot()\n\n i_labels = list()\n u_labels = list()\n for ll, label in enumerate(labels_to_plot):\n for uu in range(N_UNINTENTIONS):\n new_string = ('[U-%02d] ' % uu) + label\n u_labels.append(new_string)\n intent_string = '[I] ' + label\n i_labels.append(intent_string)\n\n categories = list(csv_file_dict.keys())\n\n if steps_per_iter is None:\n x_data = np.arange(0, max_iter)\n x_label = 'Iterations'\n else:\n x_data = np.arange(0, max_iter) * steps_per_iter\n x_label = 'Time steps (%s)' % '{:.0e}'.format(steps_per_iter)\n\n for cc, cate in enumerate(categories):\n # ######## #\n # Get data #\n # ######## #\n catego_dict = csv_file_dict[cate]\n n_subplots = len(i_labels)\n expts = list(catego_dict.keys())\n\n nexpts = len(expts)\n nseeds = len(catego_dict[expts[-1]])\n niters = max_iter\n nunint = N_UNINTENTIONS\n\n all_data = [np.zeros((nexpts, nseeds, nunint+1, niters))\n for _ in i_labels]\n\n algos = list()\n infos = list()\n\n for ee, expt in enumerate(expts):\n seeds = catego_dict[expt]\n algos.append(list())\n\n for ss, seed in enumerate(seeds):\n data_dir = catego_dict[expt][ss][0]\n info = [ii + 1 for ii in catego_dict[expt][ss][1]] # Because Main is 0 not -1\n\n variant_file = os.path.join(data_dir,\n VARIANT_FILE)\n with open(variant_file) as json_data:\n algo_name = json.load(json_data)['algo_name']\n algos[-1].append(algo_name)\n if ss == 0:\n infos.append(info)\n\n csv_file = os.path.join(data_dir, LOG_FILE)\n # print(csv_file)\n if algo_name.upper() in ['HIUSAC', 'HIUSACNEW', 'HIUDDPG']:\n data_csv = get_csv_data(csv_file, i_labels + u_labels)\n else:\n data_csv = get_csv_data(csv_file, i_labels)\n\n for dd in range(n_subplots):\n if data_csv.shape[-1] < max_iter:\n raise ValueError('por ahora hay solo %02d iters. En %s'\n % (data_csv.shape[-1],\n csv_file))\n n_data = data_csv.shape[0]\n all_data[dd][ee, ss, :n_data, :] = data_csv[:, :max_iter]\n\n # TODO: Assuming only AvgReturn\n rew_scales = catego_dict[expt][ss][2]\n for ii, rew_scale in zip(info, rew_scales):\n all_data[-1][ee, ss, ii, :] *= 1 / rew_scale\n\n # ############# #\n # Plot the data #\n # ############# #\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n # fig.suptitle('Expected Return - '+str(cate), fontweight='bold')\n if fig_name_prefix is None:\n fig_name_prefix = \"\"\n fig_title = (fig_name_prefix + 'Expected Return '+str(cate)).replace(\" \", \"_\")\n fig.canvas.set_window_title(fig_title)\n lines = list()\n labels = list()\n\n for aa, ax in enumerate(axs):\n for ee, expt in enumerate(expts):\n print('----> cat:', cate, '|', expt, all_data[aa][ee, :, :, :].shape, '| info:', infos[ee])\n for ii, iu_idx in enumerate(infos[ee]):\n # for ii in range(max_unint):\n data_mean = np.mean(all_data[aa][ee, :, iu_idx, :], axis=0)\n data_std = np.std(all_data[aa][ee, :, iu_idx, :], axis=0)\n # 85:1.440, 90:1.645, 95:1.960, 99:2.576\n ax.fill_between(\n x_data,\n (data_mean - 0.5 * data_std),\n (data_mean + 0.5 * data_std), alpha=.3)\n mean_plot = ax.plot(x_data, data_mean)[0]\n\n if aa == 0:\n lines.append(mean_plot)\n if algos[ee][ii].upper() == 'HIUSAC':\n if iu_idx == 0:\n i_suffix = ' [I]'\n else:\n i_suffix = ' [U-%02d]' % iu_idx\n labels.append(expt + i_suffix)\n else:\n labels.append(expt)\n\n xdiff = x_data[1] - x_data[0]\n ax.set_xlim(x_data[0]-xdiff, x_data[-1] + xdiff)\n ax.set_ylabel(labels_y_axis[aa], fontsize=50)\n plt.setp(ax.get_xticklabels(), visible=False)\n ax.xaxis.set_major_locator(plt.MultipleLocator(50))\n ax.xaxis.set_minor_locator(plt.MultipleLocator(10))\n\n axs[-1].set_xlabel(x_label, fontsize=50)\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n legend = fig.legend(lines, labels, loc='lower right', ncol=1,\n # legend = fig.legend(lines, labels, loc=(-1, 0), ncol=1,\n labelspacing=0., prop={'size': 40})\n fig.set_size_inches(19, 11) # 1920 x 1080\n fig.tight_layout()\n legend.draggable(True)\n\n plt.show(block=block)\n" }, { "alpha_fraction": 0.4656716287136078, "alphanum_fraction": 0.482587069272995, "avg_line_length": 29.454545974731445, "blob_id": "6a8f1a885b09b50c1674023a71f5e8f0d57945ea", "content_id": "85edd587e5f391cd8bff95ea840af9b63dcaa5b4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1005, "license_type": "permissive", "max_line_length": 77, "num_lines": 33, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/costs/cost_action.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass CostAction(object):\n def __init__(self, wu, target=None):\n self._wu = wu\n self._target = target\n\n def eval(self, path):\n actions = path['actions']\n T = len(actions)\n Du = path['actions'][-1].shape[0]\n Dx = path['observations'][-1].shape[0]\n\n # Code from superball_gps\n if self._target is None:\n target = 0\n else:\n target = np.tile(self._target, (T, 1))\n # target = np.tile(self._hyperparams['target'], (Du, 1))\n\n # l = 0.5 * np.sum(self._hyperparams['wu'] * (sample_u ** 2), axis=1)\n l = 0.5 * np.sum(self._wu * ((actions - target) ** 2),\n axis=1)\n\n # lu = self._hyperparams['wu'] * sample_u\n lu = self._wu * (actions - target)\n lx = np.zeros((T, Dx))\n luu = np.tile(np.diag(self._wu), [T, 1, 1])\n lxx = np.zeros((T, Dx, Dx))\n lux = np.zeros((T, Du, Dx))\n\n return l, lx, lu, lxx, luu, lux\n" }, { "alpha_fraction": 0.5726277232170105, "alphanum_fraction": 0.5805097818374634, "avg_line_length": 27.23410415649414, "blob_id": "b3aa3a7febed29c398beebfb7d2f3e6b8aef68b4", "content_id": "b29ee0a10f5a4d5706c5de9d080ca30968028103", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9769, "license_type": "permissive", "max_line_length": 93, "num_lines": 346, "path": "/robolearn/utils/plots/learning_process_plots.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots import subplots\nfrom robolearn.utils.plots import get_csv_data\nfrom builtins import input\n\n\ndef plot_process_iu_returns(csv_file, n_unintentional=None, block=False):\n labels_to_plot = ['Test Returns Mean']\n\n if n_unintentional is None:\n n_unintentional = 0\n else:\n n_unintentional += 1\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_unintentional):\n new_string = ('[U-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n new_string = '[I] ' + label\n new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_unintentional + 1)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Avg Return', fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n print('total_iters:', len(data[-1]))\n plt.show(block=block)\n\n\ndef plot_process_iu_avg_rewards(csv_file, n_unintentional=None, block=False):\n labels_to_plot = ['Test Rewards Mean']\n\n if n_unintentional is None:\n n_unintentional = 0\n else:\n n_unintentional += 1\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_unintentional):\n new_string = ('[U-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n new_string = '[I] ' + label\n new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_unintentional + 1)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Rewards Mean', fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n print('total_iters:', len(data[-1]))\n plt.show(block=block)\n\n\ndef plot_process_iu_policies(csv_file, n_unintentional=None, block=False,\n plot_initial=False, plot_intentional=False,\n deterministic=False):\n if deterministic:\n labels_to_plot = [\n 'Mixing Weights',\n 'Policy Loss',\n # 'Raw Policy Loss',\n 'Rewards',\n ]\n else:\n labels_to_plot = [\n 'Mixing Weights',\n 'Pol KL Loss',\n 'Rewards',\n 'Policy Entropy',\n # 'Log Policy Target',\n # 'Policy Mean',\n # 'Policy Std'\n ]\n\n if n_unintentional is None:\n n_unintentional = 0\n else:\n n_unintentional += 1\n\n if plot_initial:\n idx0 = 0\n else:\n idx0 = 1\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for ll, label in enumerate(labels_to_plot):\n for uu in range(n_unintentional):\n new_string = ('[U-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n if ll > 0 and plot_intentional:\n new_string = '[I] ' + label\n new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Policy Properties',\n fontweight='bold')\n\n idx_counter = 0\n lines = list()\n labels = list()\n\n for aa, ax in enumerate(axs):\n for uu in range(n_unintentional):\n line, = ax.plot(data[idx_counter, idx0:], label='[U-%02d] ' % uu)\n idx_counter += 1\n if aa == 1:\n lines.append(line)\n labels.append('[U-%02d] ' % uu)\n\n if aa > 0 and plot_intentional:\n line, = ax.plot(data[idx_counter, idx0:], label='[I]')\n idx_counter += 1\n if aa == 1:\n lines.append(line)\n labels.append('[I]')\n\n ax.set_ylabel(labels_to_plot[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n fig.legend(lines, labels, loc='right', ncol=1, labelspacing=0.)\n\n plt.show(block=block)\n\n\ndef plot_process_iu_values_errors(csv_file, n_unintentional=None, block=False):\n labels_to_plot = ['Qf Loss', 'Vf Loss']\n\n if n_unintentional is None:\n n_unintentional = 0\n else:\n n_unintentional += 1\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_unintentional):\n new_string = ('[U-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n new_string = '[I] ' + label\n new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_unintentional + 1)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Value Functions Errors',\n fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=block)\n\n\ndef plot_process_iu_alphas(csv_file, n_unintentional=None, block=False):\n labels_to_plot = ['Alphas']\n\n if n_unintentional is None:\n n_unintentional = 0\n else:\n n_unintentional += 1\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_unintentional):\n new_string = ('[U-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n new_string = '[I] ' + label\n new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_unintentional + 1)\n\n try:\n data = get_csv_data(csv_file, new_labels)\n except:\n print(\"There is no alphas data to show!!\")\n return\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Alphas', fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=block)\n\n\ndef plot_process_general_data(csv_file, block=False):\n labels_to_plot = [\n # 'mean-sq-bellman-error',\n # 'Bellman Residual (QFcn)',\n # 'Surrogate Reward (Policy)',\n # 'return-average',\n 'Exploration Returns Mean',\n 'Test Returns Mean',\n # 'episode-length-min',\n # 'episode-length-max',\n\n # 'Log Pis'\n ]\n\n # if n_unintentional is None:\n # n_unintentional = 0\n # else:\n # n_unintentional += 1\n n_unintentional = 0\n #\n # # Add Intentional-Unintentional Label\n # new_labels = list()\n # for label in labels_to_plot:\n # for uu in range(n_unintentional):\n # new_string = ('[U-%02d] ' % uu) + label\n # new_labels.append(new_string)\n #\n # new_string = '[I] ' + label\n # new_labels.append(new_string)\n\n new_labels = labels_to_plot\n\n n_subplots = len(labels_to_plot) * (n_unintentional + 1)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('General Info',\n fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=block)\n\n\ndef plot_process_haarnoja(csv_file, n_unintentional=None, block=False):\n labels_to_plot = ['return-average', 'episode-length-avg', 'log-pi-mean', 'log-sigs-mean']\n\n if n_unintentional is None:\n n_unintentional = 0\n else:\n n_unintentional += 1\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_unintentional):\n new_string = ('[U-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n # new_string = '[I] ' + label\n new_string = label\n new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_unintentional + 1)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Avg Return and Avg Reward',\n fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=block)\n" }, { "alpha_fraction": 0.5926076173782349, "alphanum_fraction": 0.6092607378959656, "avg_line_length": 27.627906799316406, "blob_id": "012b90c3bc1a4c8451bf50c50c1f8b0ce2a3eb86", "content_id": "bd1f0738e600eeabc2b0b22dcd9684d81ebf371f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4924, "license_type": "permissive", "max_line_length": 80, "num_lines": 172, "path": "/examples/rl_algos/gps/reacher_gps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Reinforce on Pusher2D3DofGoalCompoEnv.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\n\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn_gym_envs.pybullet import Reacher2D3DofBulletEnv\n\nfrom robolearn.algorithms.rl_algos import MDGPS\n\nfrom robolearn.torch.policies import MlpPolicy\nfrom robolearn.torch.policies import LinearGaussianPolicy\n\nimport argparse\n\nN_LOCAL_POLS = 3\nPATH_LENGTH = 100\nPATHS_PER_LOCAL_POL = 5\nPATHS_PER_EVAL = 1\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\n\ndef experiment(variant):\n ptu.set_gpu_mode(variant['gpu'])\n\n # env = NormalizedBoxEnv(\n # Reacher2D3DofBulletEnv(**variant['env_params'])\n # )\n env = Reacher2D3DofBulletEnv(**variant['env_params'])\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n initial_conds = [\n [10, 5, 20, 0.2, 0.5, 0],\n [10, 5, 20, 0.1, 0.1, 0],\n [10, 5, 20, 0.15, 0.8, 0],\n ]\n\n for init_cond in initial_conds:\n env.add_initial_condition(robot_config=np.deg2rad(init_cond[:3]),\n tgt_state=init_cond[-3:])\n\n net_size = variant['net_size']\n # global_policy = TanhGaussianPolicy(\n global_policy = MlpPolicy(\n hidden_sizes=[net_size, net_size],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n local_policies = [LinearGaussianPolicy(obs_dim=obs_dim,\n action_dim=action_dim,\n T=PATH_LENGTH,\n )\n for _ in range(N_LOCAL_POLS)]\n #\n # replay_buffer = FakeReplayBuffer()\n # variant['algo_params']['replay_buffer'] = replay_buffer\n #\n # # QF Plot\n # # variant['algo_params']['epoch_plotter'] = None\n\n algorithm = MDGPS(\n env=env,\n eval_env=env,\n save_environment=False,\n local_policies=local_policies,\n global_policy=global_policy,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n algorithm.train()\n\n return algorithm\n\n\nexpt_params = dict(\n algo_name=MDGPS.__name__,\n algo_params=dict(\n # Common RLAlgo params\n num_epochs=10, # n_epochs\n rollouts_per_epoch=N_LOCAL_POLS * PATHS_PER_LOCAL_POL,\n num_steps_per_epoch=N_LOCAL_POLS * PATHS_PER_LOCAL_POL * PATH_LENGTH,\n num_updates_per_train_call=1, # How to many run algorithm train fcn\n num_steps_per_eval=N_LOCAL_POLS * PATHS_PER_EVAL * PATH_LENGTH,\n # EnvSampler params\n max_path_length=PATH_LENGTH, # max_path_length\n render=False,\n # MDGPS params\n traj_opt_inner_iters=1,\n train_cond_idxs=[0, 1, 2],\n test_cond_idxs=[0, 1, 2],\n ),\n net_size=64\n)\n\n\nenv_params = dict(\n is_render=False,\n obs_with_img=False,\n rdn_tgt_pos=True,\n tgt_pose=None,\n rdn_robot_config=True,\n robot_config=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n obs_distances=False, # If True obs contain 'distance' vectors instead poses\n tgt_cost_weight=1.0,\n ctrl_cost_weight=1.0e-2,\n use_log_distances=False,\n # use_log_distances=False,\n log_alpha=1e-6,\n tgt_tolerance=0.05,\n max_time=10,\n # max_time=PATH_LENGTH*DT,\n half_env=False,\n)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=50)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--gpu', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n expt_variant = expt_params\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = 'reacher_gps'\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n expt_variant['env_params'] = env_params\n expt_variant['env_params']['is_render'] = args.render\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algo = experiment(expt_variant)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.5599187612533569, "alphanum_fraction": 0.5890318155288696, "avg_line_length": 26.867923736572266, "blob_id": "810e418cec9b651b9f95ae2946275d0ea44b82b2", "content_id": "eeb851c23146056f337cdd474cf63931429705cf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1477, "license_type": "permissive", "max_line_length": 113, "num_lines": 53, "path": "/examples/rl_algos/ddpg/scripts/navigation2d_hiu_ddpg.sh", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# #### #\n# VARS #\n# #### #\nexpt_name='objective_test'\nenv_name='navigation2d'\nalgo_name='hiu_ddpg'\n\npython_script=${env_name}'_'${algo_name}\nlog_dir_path='/home/desteban/logs/'${expt_name}'/'${env_name}'/'\n\n#seeds=(0 50 100)\n#init_index=0\n#end_index=3\n#seeds=(\"${seeds[@]:${init_index}:${end_index}}\")\n\n#default_seeds=(0 1 2 3 4 5 6 7 8 9)\ndefault_seeds=(610 710 810 910 1010)\n#default_seeds=(510)\nseeds=(\"${@:-${default_seeds[@]}}\")\ntotal_seeds=${#seeds[@]}\n\n#default_subtasks=(0 1 -1)\ndefault_subtasks=(-1)\nsubtasks=(\"${@:-${default_subtasks[@]}}\")\ntotal_subtasks=${#subtasks[@]}\n\ntotal_scripts=$(($total_seeds * $total_subtasks))\n\necho \"Robolearn DRL script\"\necho \"Total seeds: ${#seeds[@]}\"\necho \"Experiment seeds: ${seeds[@]}\"\necho \"\"\n\nfor seed_idx in ${!seeds[@]}; do\nfor subtask_idx in ${!subtasks[@]}; do\n seed=${seeds[seed_idx]}\n subtask=${subtasks[subtask_idx]}\n# script_index=$((index+init_index))\n script_index=$(((seed_idx)*total_subtasks + subtask_idx))\n echo \"********************************************************\"\n echo \"Running '${python_script}.py' $((script_index+1))/${total_scripts} | Seed: ${seed} Subtask: ${subtask}\"\n\n expt_name='sub'${subtask}_${algo_name}_${seed}\n echo \"Log_dir '${log_dir_path}'\"\n\n log_dir=${log_dir_path}'sub'${subtask}'/'${algo_name}_${seed}\n\n python ../${python_script}.py --seed ${seed} --subtask ${subtask} \\\n --log_dir ${log_dir} --expt_name ${env_name} --gpu\ndone\ndone\n" }, { "alpha_fraction": 0.521242082118988, "alphanum_fraction": 0.5261847376823425, "avg_line_length": 32.591888427734375, "blob_id": "5830626c409b2fd1ef577be156544f2fbcf42bda", "content_id": "1e7827d6bf095a6e16a6ffc2bada99507648f002", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26504, "license_type": "permissive", "max_line_length": 86, "num_lines": 789, "path": "/robolearn/torch/algorithms/rl_algos/ddpg/hiu_ddpg.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Pong's SAC implementation\n\nhttps://github.com/vitchyr/rlkit\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nimport torch.optim as optim\n\nfrom collections import OrderedDict\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils import eval_util\nfrom robolearn.utils.samplers import InPlacePathSampler\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.torch.policies import WeightedMultiPolicySelector\nfrom robolearn.utils.data_management.normalizer import RunningNormalizer\n\nimport tensorboardX\n\n\nclass HIUDDPG(RLAlgorithm, TorchAlgorithm):\n \"\"\"\n Hierarchical Intentional-Unintentional Deep Deterministic Policy Gradient\n (HIU-DDPG).\n \"\"\"\n def __init__(\n self,\n env,\n policy,\n explo_policy,\n u_qf,\n\n replay_buffer,\n batch_size=1024,\n normalize_obs=False,\n eval_env=None,\n\n i_qf=None,\n\n action_prior='uniform',\n\n policy_lr=3e-4,\n qf_lr=1e-4,\n\n i_policy_pre_activation_weight=0.,\n i_policy_mixing_coeff_weight=1e-3,\n u_policy_pre_activation_weight=None,\n\n policy_weight_decay=0.,\n qf_weight_decay=0.,\n\n optimizer='adam',\n # optimizer='rmsprop',\n # optimizer='sgd',\n optimizer_kwargs=None,\n\n i_soft_target_tau=1e-2,\n u_soft_target_tau=1e-2,\n i_target_update_interval=1,\n u_target_update_interval=1,\n\n reward_scale=1.,\n u_reward_scales=None,\n\n min_q_value=-np.inf,\n max_q_value=np.inf,\n\n residual_gradient_weight=0,\n\n eval_with_target_policy=False,\n save_replay_buffer=False,\n log_tensorboard=False,\n **kwargs\n ):\n\n # ###### #\n # Models #\n # ###### #\n\n # Deterministic Policies\n self._policy = policy\n self._target_policy = policy.copy()\n\n # Exploration Policy\n self._exploration_policy = explo_policy\n\n # Evaluation Policy\n if eval_with_target_policy:\n eval_policy = self._target_policy\n else:\n eval_policy = self._policy\n\n # Observation Normalizer\n if normalize_obs:\n self._obs_normalizer = RunningNormalizer(shape=env.obs_dim)\n else:\n self._obs_normalizer = None\n\n RLAlgorithm.__init__(\n self,\n explo_env=env,\n explo_policy=self._exploration_policy,\n eval_env=eval_env,\n eval_policy=eval_policy,\n obs_normalizer=self._obs_normalizer,\n **kwargs\n )\n\n # Number of Unintentional Tasks (Composable Tasks)\n self._n_unintentional = self._policy.n_heads\n\n # Evaluation Sampler (One for each unintentional)\n self.eval_u_samplers = [\n InPlacePathSampler(\n env=env,\n policy=WeightedMultiPolicySelector(eval_policy, idx),\n total_samples=self.num_steps_per_eval,\n max_path_length=self.max_path_length,\n deterministic=None,\n )\n for idx in range(self._n_unintentional)\n ]\n\n # Important algorithm hyperparameters\n self._action_prior = action_prior\n\n # Intentional (Main Task) Q-function\n self._i_qf = i_qf\n self._i_target_qf = i_qf.copy()\n\n # Unintentional (Composable Tasks) Q-functions\n self._u_qf = u_qf\n self._u_target_qf = u_qf.copy()\n\n self._min_q_value = min_q_value\n self._max_q_value = max_q_value\n self._residual_gradient_weight = residual_gradient_weight\n\n # Soft-update rate for target V-functions\n self._i_soft_target_tau = i_soft_target_tau\n self._u_soft_target_tau = u_soft_target_tau\n self._i_target_update_interval = i_target_update_interval\n self._u_target_update_interval = u_target_update_interval\n\n # Reward Scales\n self.reward_scale = reward_scale\n if u_reward_scales is None:\n reward_scale = kwargs['reward_scale']\n u_reward_scales = [reward_scale\n for _ in range(self._n_unintentional)]\n self._u_reward_scales = ptu.FloatTensor(u_reward_scales)\n\n # Replay Buffer\n self.replay_buffer = replay_buffer\n self.batch_size = batch_size\n self.save_replay_buffer = save_replay_buffer\n\n # ########## #\n # Optimizers #\n # ########## #\n if optimizer.lower() == 'adam':\n optimizer_class = optim.Adam\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n amsgrad=True,\n # amsgrad=False,\n )\n elif optimizer.lower() == 'rmsprop':\n optimizer_class = optim.RMSprop\n if optimizer_kwargs is None:\n optimizer_kwargs = dict(\n\n )\n else:\n raise ValueError('Wrong optimizer')\n self._qf_lr = qf_lr\n self._policy_lr = policy_lr\n\n # Q-function and V-function Optimization Criteria\n self._u_qf_criterion = nn.MSELoss()\n self._i_qf_criterion = nn.MSELoss()\n\n # Q-function(s) optimizers(s)\n self._u_qf_optimizer = optimizer_class(\n self._u_qf.parameters(),\n lr=qf_lr,\n weight_decay=qf_weight_decay,\n **optimizer_kwargs\n )\n self._i_qf_optimizer = optimizer_class(\n self._i_qf.parameters(),\n lr=qf_lr,\n weight_decay=qf_weight_decay,\n **optimizer_kwargs\n )\n\n # Policy optimizer\n self._policy_optimizer = optimizer_class(\n self._policy.parameters(),\n lr=policy_lr,\n weight_decay=policy_weight_decay,\n **optimizer_kwargs\n )\n\n # Policy regularization coefficients (weights)\n self._i_pol_pre_activ_weight = i_policy_pre_activation_weight\n self._i_pol_mixing_coeff_weight = i_policy_mixing_coeff_weight\n\n if u_policy_pre_activation_weight is None:\n u_policy_pre_activation_weight = [\n i_policy_pre_activation_weight\n for _ in range(self._n_unintentional)\n ]\n self._u_policy_pre_activ_weight = \\\n ptu.FloatTensor(u_policy_pre_activation_weight)\n\n # Useful Variables for logging\n self.log_data = dict()\n self.log_data['Raw Pol Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Pol Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Qf Loss'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Rewards'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n ))\n self.log_data['Policy Action'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional + 1,\n self.explo_env.action_dim,\n ))\n self.log_data['Mixing Weights'] = np.zeros((\n self.num_train_steps_per_epoch,\n self._n_unintentional,\n self.explo_env.action_dim,\n ))\n\n # Tensorboard-like Logging\n self._log_tensorboard = log_tensorboard\n if log_tensorboard:\n self._summary_writer = \\\n tensorboardX.SummaryWriter(log_dir=logger.get_snapshot_dir())\n else:\n self._summary_writer = None\n\n def pretrain(self, n_pretrain_samples):\n # We do not require any pretrain (I think...)\n observation = self.explo_env.reset()\n for ii in range(n_pretrain_samples):\n action = self.explo_env.action_space.sample()\n # Interact with environment\n next_ob, reward, terminal, env_info = (\n self.explo_env.step(action)\n )\n agent_info = None\n\n # Increase counter\n self._n_env_steps_total += 1\n # Create np.array of obtained terminal and reward\n terminal = np.array([terminal])\n reward = np.array([reward])\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_ob,\n agent_info=agent_info,\n env_info=env_info,\n )\n observation = next_ob\n\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n if terminal:\n self.explo_env.reset()\n\n def _do_training(self):\n # Get batch of samples\n batch = self.get_batch()\n\n # Get common data from batch\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n\n # Get the idx for logging\n step_idx = self._n_epoch_train_steps\n\n # ######################## #\n # Get Next Obs Policy Info #\n # ######################## #\n i_next_actions, policy_info = self._target_policy(\n next_obs,\n pol_idx=None,\n optimize_policies=False,\n )\n u_next_actions = policy_info['pol_actions'].detach()\n\n # ########################## #\n # Unintentional Critics Step #\n # ########################## #\n u_rewards = \\\n (batch['reward_vectors'] * self._u_reward_scales).unsqueeze(-1)\n u_terminals = (batch['terminal_vectors']).unsqueeze(-1)\n\n # Unintentional Q Values: Q(s', a')\n u_next_q = torch.cat(\n [\n self._u_target_qf(next_obs, u_next_actions[:, uu, :])[0][uu].unsqueeze(1)\n for uu in range(self._n_unintentional)\n ],\n dim=1\n )\n\n # Calculate Bellman Backup for Unintentional Q-values\n u_q_backup = u_rewards + (1. - u_terminals) * self.discount * u_next_q\n u_q_backup = u_q_backup.detach()\n u_q_backup = torch.clamp(u_q_backup, self._min_q_value, self._max_q_value)\n # Hack for ICLR rebuttal\n if hasattr(self, 'reward_type') and self.reward_type == 'indicator':\n # TODO: CHECK THIS\n u_q_backup = \\\n torch.clamp(u_q_backup, -self.reward_scale/(1-self.discount), 0)\n\n u_q_pred = torch.cat([qq.unsqueeze(1)\n for qq in self._u_qf(obs, actions)[0]],\n dim=1)\n\n # Unintentional QF Loss: Mean Squared Bellman Equation (MSBE)\n u_qf_loss = \\\n 0.5*torch.mean((u_q_backup - u_q_pred)**2, dim=0).squeeze(-1)\n # MSBE Q Loss For all unintentional policies\n total_u_qf_loss = torch.sum(u_qf_loss)\n\n if self._residual_gradient_weight > 0:\n raise NotImplementedError\n\n # Update Unintentional Q-value functions\n self._u_qf_optimizer.zero_grad()\n total_u_qf_loss.backward()\n self._u_qf_optimizer.step()\n\n # ####################### #\n # Intentional Critic Step #\n # ####################### #\n # Get Intentional rewards and terminals\n i_rewards = batch['rewards'] * self.reward_scale\n i_terminals = batch['terminals']\n\n # Intentional target Q Values: Q(s', a')\n i_next_q = self._i_target_qf(next_obs, i_next_actions)[0]\n\n # Calculate Intentional QF Losses (Bellman Eq.)\n i_q_backup = i_rewards + (1. - i_terminals) * self.discount * i_next_q\n i_q_backup = i_q_backup.detach()\n i_q_backup = torch.clamp(i_q_backup, self._min_q_value, self._max_q_value)\n # Hack for ICLR rebuttal\n if hasattr(self, 'reward_type') and self.reward_type == 'indicator':\n # TODO: CHECK THIS\n i_q_backup = \\\n torch.clamp(i_q_backup, -self.reward_scale/(1-self.discount), 0)\n\n i_q_pred = self._i_qf(obs, actions)[0]\n\n i_qf_loss = \\\n 0.5*torch.mean((i_q_backup - i_q_pred)**2, dim=0)\n\n if self._residual_gradient_weight > 0:\n raise NotImplementedError\n\n # Update Intentional Q-value model parameters\n self._i_qf_optimizer.zero_grad()\n i_qf_loss.backward()\n self._i_qf_optimizer.step()\n\n # #################### #\n # Unintentional Actors #\n # #################### #\n\n # Get Obs Policy Info #\n i_new_actions, policy_info = self._policy(\n obs,\n pol_idx=None,\n optimize_policies=False,\n )\n u_new_actions = policy_info['pol_actions']\n\n i_new_pre_tanh_value = policy_info['pre_tanh_value']\n u_new_pre_tanh_values = policy_info['pol_pre_tanh_values']\n new_mixing_coeff = policy_info['mixing_coeff']\n\n if self._action_prior == 'normal':\n raise NotImplementedError\n else:\n u_policy_prior_log_probs = 0.0 # Uniform prior\n\n # Get Unintentional Q1(s, a)\n u_q_new_actions = torch.cat(\n [self._u_qf(obs, u_new_actions[:, uu, :])[0][uu].unsqueeze(1)\n for uu in range(self._n_unintentional)\n ],\n dim=1\n )\n\n # Unintentional Policies KL loss: - (E_a[Q(s, a)])\n u_raw_policy_loss = -u_q_new_actions.mean(dim=0).squeeze(-1)\n\n # Get Unintentional Policies regularization loss\n u_pre_activation_reg_loss = \\\n self._u_policy_pre_activ_weight * \\\n (u_new_pre_tanh_values**2).sum(dim=-1).mean(dim=0).mean(dim=-1)\n u_policy_regu_loss = u_pre_activation_reg_loss + 0\n\n # Get Unintentional Policies Total loss\n u_policy_loss = (u_raw_policy_loss + u_policy_regu_loss)\n total_u_policy_loss = torch.sum(u_policy_loss)\n\n # ################# #\n # Intentional Actor #\n # ################# #\n if self._action_prior == 'normal':\n raise NotImplementedError\n else:\n i_policy_prior_log_probs = 0.0 # Uniform prior\n\n # Intentional Q(s, a)\n i_q_new_actions = self._i_qf(obs, i_new_actions)[0]\n\n # Intentional KL loss: - (E_a[Q(s, a)])\n i_raw_policy_loss = -i_q_new_actions.mean()\n\n # Intentional policy regularization loss\n i_pre_activation_reg_loss = \\\n self._i_pol_pre_activ_weight * \\\n (i_new_pre_tanh_value**2).sum(dim=-1).mean()\n # TODO: Check the mixing coeff loss:\n mixing_coeff_loss = self._i_pol_mixing_coeff_weight * \\\n 0.5*((new_mixing_coeff ** 2).sum(dim=-1)).mean()\n i_policy_regu_loss = (i_pre_activation_reg_loss + mixing_coeff_loss)\n\n # Intentional Policy Total loss\n i_policy_loss = (i_raw_policy_loss + i_policy_regu_loss)\n\n # Update both Intentional and Unintentional Policies at the same time\n self._policy_optimizer.zero_grad()\n total_iu_loss = total_u_policy_loss + i_policy_loss\n total_iu_loss.backward()\n self._policy_optimizer.step()\n\n # ###################### #\n # Update Target Networks #\n # ###################### #\n if self._n_total_train_steps % self._u_target_update_interval == 0:\n ptu.soft_update_from_to(\n source=self._u_qf,\n target=self._u_target_qf,\n tau=self._u_soft_target_tau\n )\n if self._n_total_train_steps % self._i_target_update_interval == 0:\n ptu.soft_update_from_to(\n source=self._i_qf,\n target=self._i_target_qf,\n tau=self._i_soft_target_tau\n )\n if self._n_total_train_steps % self._i_target_update_interval == 0:\n ptu.soft_update_from_to(\n source=self._policy,\n target=self._target_policy,\n tau=self._i_soft_target_tau\n )\n\n # ############### #\n # LOG Useful Data #\n # ############### #\n self.log_data['Raw Pol Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_raw_policy_loss.squeeze(-1))\n self.log_data['Raw Pol Loss'][step_idx, -1] = \\\n ptu.get_numpy(i_raw_policy_loss)\n\n self.log_data['Pol Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_policy_loss.squeeze(-1))\n self.log_data['Pol Loss'][step_idx, -1] = \\\n ptu.get_numpy(i_policy_loss)\n\n self.log_data['Qf Loss'][step_idx, :-1] = \\\n ptu.get_numpy(u_qf_loss.squeeze(-1))\n self.log_data['Qf Loss'][step_idx, -1] = ptu.get_numpy(i_qf_loss)\n\n self.log_data['Rewards'][step_idx, :-1] = \\\n ptu.get_numpy(u_rewards.mean(dim=0).squeeze(-1))\n self.log_data['Rewards'][step_idx, -1] = \\\n ptu.get_numpy(i_rewards.mean(dim=0).squeeze(-1))\n\n self.log_data['Mixing Weights'][step_idx, :, :] = \\\n ptu.get_numpy(new_mixing_coeff.mean(dim=0))\n\n self.log_data['Policy Action'][step_idx, :-1, :] = \\\n ptu.get_numpy(u_new_actions.mean(dim=0))\n self.log_data['Policy Action'][step_idx, -1, :] = \\\n ptu.get_numpy(i_new_actions.mean(dim=0))\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'TrainingI/qf_loss',\n ptu.get_numpy(i_qf_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/avg_reward',\n ptu.get_numpy(i_rewards.mean()),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/policy_loss',\n ptu.get_numpy(i_policy_loss),\n self._n_env_steps_total\n )\n self._summary_writer.add_scalar(\n 'TrainingI/q_vals',\n ptu.get_numpy(i_q_new_actions.mean()),\n self._n_env_steps_total\n )\n\n def _not_do_training(self):\n return\n\n @property\n def torch_models(self):\n networks_list = [\n self._policy,\n self._target_policy,\n self._i_qf,\n self._i_target_qf,\n self._u_qf,\n self._u_target_qf,\n ]\n\n return networks_list\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Stuff to save in file.\n Args:\n epoch:\n\n Returns:\n\n \"\"\"\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n self._epoch_plotter.save_figure(epoch)\n\n snapshot = RLAlgorithm.get_epoch_snapshot(self, epoch)\n\n snapshot.update(\n policy=self._policy,\n target_policy=self._target_policy,\n exploration_policy=self._exploration_policy,\n qf=self._i_qf,\n target_qf=self._i_target_qf,\n u_qf=self._u_qf,\n target_u_qf=self._u_target_qf,\n )\n\n if self.explo_env.online_normalization or self.explo_env.normalize_obs:\n snapshot.update(\n obs_mean=self.explo_env.obs_mean,\n obs_var=self.explo_env.obs_var,\n )\n\n # Observation Normalizer\n snapshot.update(\n obs_normalizer=self._obs_normalizer,\n )\n\n # Replay Buffer\n if self.save_replay_buffer:\n snapshot.update(\n replay_buffer=self.replay_buffer,\n )\n\n return snapshot\n\n def _update_logging_data(self):\n max_step = max(self._n_epoch_train_steps, 1)\n\n if self.eval_statistics is None:\n self.eval_statistics = OrderedDict()\n\n # Unintentional info\n for uu in range(self._n_unintentional):\n self.eval_statistics['[U-%02d] Qf Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf Loss'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Raw Policy Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Raw Pol Loss'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Policy Loss' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol Loss'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Rewards' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Rewards'][:max_step, uu]\n ))\n self.eval_statistics['[U-%02d] Mixing Weights' % uu] = \\\n np.nan_to_num(np.mean(\n self.log_data['Mixing Weights'][:max_step, uu]\n ))\n\n for aa in range(self.explo_env.action_dim):\n self.eval_statistics['[U-%02d] Policy Action [%02d]' % (uu, aa)] = \\\n np.nan_to_num(np.mean(\n self.log_data['Policy Action'][:max_step, uu, aa]\n ))\n\n # Intentional info\n self.eval_statistics['[I] Qf Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Qf Loss'][:max_step, -1]\n ))\n self.eval_statistics['[I] Raw Policy Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Raw Pol Loss'][:max_step, -1]\n ))\n self.eval_statistics['[I] Policy Loss'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Pol Loss'][:max_step, -1]\n ))\n self.eval_statistics['[I] Rewards'] = \\\n np.nan_to_num(np.mean(\n self.log_data['Rewards'][:max_step, -1]\n ))\n\n def evaluate(self, epoch):\n statistics = OrderedDict()\n self._update_logging_data()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n # Interaction Paths for each unintentional policy\n test_paths = [None for _ in range(self._n_unintentional)]\n for unint_idx in range(self._n_unintentional):\n logger.log(\"[U-%02d] Collecting samples for evaluation\" % unint_idx)\n test_paths[unint_idx] = \\\n self.eval_u_samplers[unint_idx].obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths[unint_idx], stat_prefix=\"[U-%02d] Test\" % unint_idx,\n ))\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'EvaluationU%02d/avg_return' % unint_idx,\n statistics['[U-%02d] Test Returns Mean' % unint_idx],\n self._n_epochs\n )\n\n self._summary_writer.add_scalar(\n 'EvaluationU%02d/avg_reward' % unint_idx,\n statistics['[U-%02d] Test Rewards Mean' % unint_idx],\n self._n_epochs\n )\n\n # Interaction Paths for the intentional policy\n logger.log(\"[I] Collecting samples for evaluation\")\n i_test_paths = self.eval_sampler.obtain_samples()\n statistics.update(eval_util.get_generic_path_information(\n i_test_paths, stat_prefix=\"[I] Test\",\n ))\n\n if self._exploration_paths:\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n else:\n statistics.update(eval_util.get_generic_path_information(\n i_test_paths, stat_prefix=\"Exploration\",\n ))\n\n if self._log_tensorboard:\n self._summary_writer.add_scalar(\n 'EvaluationI/avg_return',\n statistics['[I] Test Returns Mean'],\n self._n_epochs\n )\n\n self._summary_writer.add_scalar(\n 'EvaluationI/avg_reward',\n statistics['[I] Test Rewards Mean'] * self.reward_scale,\n self._n_epochs\n )\n\n if hasattr(self.explo_env, \"log_diagnostics\"):\n pass\n # # TODO: CHECK ENV LOG_DIAGNOSTICS\n # print('TODO: WE NEED LOG_DIAGNOSTICS IN ENV')\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n # Epoch Plotter\n if self._epoch_plotter is not None:\n self._epoch_plotter.draw()\n\n # Reset log_data\n for key in self.log_data.keys():\n self.log_data[key].fill(0)\n\n def get_batch(self):\n batch = self.replay_buffer.random_batch(self.batch_size)\n\n if self._obs_normalizer is not None:\n batch['observations'] = \\\n self._obs_normalizer.normalize(batch['observations'])\n batch['next_observations'] = \\\n self._obs_normalizer.normalize(batch['next_observations'])\n\n return batch\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n # Add to replay buffer\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_observation,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n # Update observation normalizer (if applicable)\n if self._obs_normalizer is not None:\n self._obs_normalizer.update(np.array([observation]))\n\n RLAlgorithm._handle_step(\n self,\n observation=observation,\n action=action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _end_rollout(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n\n self.replay_buffer.terminate_episode()\n\n RLAlgorithm._end_rollout(self)\n" }, { "alpha_fraction": 0.7231759428977966, "alphanum_fraction": 0.731044352054596, "avg_line_length": 33.09756088256836, "blob_id": "93ecd662ffa1ce5ea083ab4edff2444627610930", "content_id": "30dc6c2f1a25683fcb7f3b810624dcd6b2f2cfce", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1398, "license_type": "permissive", "max_line_length": 212, "num_lines": 41, "path": "/README.md", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "# RoboLearn\n\"A *Python* package for Robot Learning\"\n\n<p align=\"center\">\n<img src=\"robolearn_logo2.png\" alt=\"robolearn_logo\" width=\"100\" height=\"100\" class=\"center\" />\n</p>\n\n**Robolearn** is a python package, mainly focused on learning control, that defines common interfaces\nbetween robot learning algorithms and real/simulated robots.\n\n**This package is ongoing**, so this version is still in development. Sorry for any inconvenience.\n\n![robolearn diagram](robolearn_diagram.png)\n\n\n# Installation\n\n```bash\ngit clone https://github.com/domingoesteban/robolearn\ncd robolearn\npip install -e .\n```\n\n# Citation\nIf you use this code or it gave you some ideas or it was useful for something else for your research,\nI would appreciate that you can cite:\n\n @misc{robolearn,\n author = {Esteban, Domingo},\n title = {RoboLearn},\n year = {2018},\n publisher = {GitHub},\n journal = {GitHub repository},\n howpublished = {\\url{https://github.com/domingoesteban/robolearn}},\n }\n\n<!--\n# Acknowledgements\n- Vitchyr Pong for rlkit repository ([rlkit repository](https://github.com/vitchyr/rlkit)). Some algorithms are based (or almost the same) the ones in rlkit. Many functionalities of robolearn use code from rlkit.\n- Tuomas Haarnoja for softqlearning repository ([softqlearning repository](https://github.com/haarnoja/softqlearning)). SoftQLearning is based in this TensorFlow implementation.\n-->\n" }, { "alpha_fraction": 0.5573863387107849, "alphanum_fraction": 0.5798295736312866, "avg_line_length": 30.711711883544922, "blob_id": "ba56fa0eb84f8ac67b40b2b977c154c124f71b9f", "content_id": "9b5b3a7505cb116439e96daa3decc6ccb9c203fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3520, "license_type": "permissive", "max_line_length": 112, "num_lines": 111, "path": "/examples/v010/continuous_envs/reacher/model_learning.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import argparse\n\nfrom robolearn.old_envs.reacher import ReacherBulletEnv\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--arg1', type=int, default=60)\n parser.add_argument('--arg2', type=str, default='string1')\n parser.add_argument('--env_with_img', action=\"store_true\", dest='env_with_img',\n default=False)\n args = parser.parse_args()\n\n EndTime = 1.0\n env_with_img = parser['env_with_img']\n obs_like_mjc = True\n ntargets = 2\n tgt_weights = [1.0, -1.0]\n rdn_tgt_pos = False\n tgt_positions = [(0.1, 0.2), (-0.1, -0.2)] # Values between [-0.2, 0.2]\n rdn_init_cfg = False\n\n # Initialize environment\n env = ReacherBulletEnv(render=False, obs_with_img=env_with_img, obs_mjc_gym=obs_like_mjc, ntargets=ntargets,\n rdn_tgt_pos=rdn_tgt_pos)\n env.seed(0)\n\n # Collect initial training data\n\n # Collect initial validation data\n\n #\n EndTime = 1.0\n env_with_img = False\n obs_like_mjc = True\n ntargets = 2\n tgt_weights = [1.0, -1.0]\n rdn_tgt_pos = False\n tgt_positions = [(0.1, 0.2), (-0.1, -0.2)] # Values between [-0.2, 0.2]\n rdn_init_cfg = False\n # env = gym.make('ReacherBullet-v5')\n env = ReacherBulletEnv(render=False, obs_with_img=env_with_img, obs_mjc_gym=obs_like_mjc, ntargets=ntargets,\n rdn_tgt_pos=rdn_tgt_pos)\n env.seed(0)\n\n episode_count = 5\n reward = 0\n done = False\n img_width = 256\n img_height = 256\n env.change_img_size(height=img_height, width=img_width)\n env.set_tgt_cost_weights(tgt_weights)\n env.set_tgt_pos(tgt_positions)\n\n ts = env.dt\n total_steps = int(EndTime/ts)\n\n # Agent\n agent = ILQRAgent(env.action_space.shape[0], env.observation_space.shape[0], total_steps)\n agent.seed(5)\n print(agent.obs_dim)\n print(agent.act_dim)\n input('saadsfsdhfkj')\n\n fig, ax = plt.subplots(1, 1)\n my_image = ax.imshow(np.zeros((img_width, img_height, 3)), interpolation='nearest', animated=True)\n fig.canvas.draw()\n background = fig.canvas.copy_from_bbox(ax.bbox) # cache the background\n plt.ion()\n plt.show()\n\n # env.render(mode='human') # Only if we want at the beginning\n for i in range(episode_count):\n # input('Press key to reset episode %d/%d' % (i+1, episode_count))\n ob = env.reset()\n input('Press key to start episode %d/%d' % (i+1, episode_count))\n\n steps_counter = 0\n\n # while True:\n while steps_counter < total_steps:\n print('external_counter', steps_counter)\n action = agent.act(ob, reward, done) * 0.001\n\n obs, reward, done, _ = env.step(action)\n if done:\n print('ENVIRONMENT DONE!!!')\n break\n # env.render()\n\n if env_with_img:\n dim_img_data = img_width*img_height*3\n rgb_image = obs[-dim_img_data:].astype(np.uint8).reshape(img_width, img_height, 3)\n else:\n rgb_image = env.render(mode='rgb_array')\n my_image.set_data(rgb_image)\n fig.canvas.restore_region(background) # restore background\n ax.draw_artist(my_image)\n fig.canvas.blit(ax.bbox) # redraw the axes rectangle\n # fig.canvas.draw()\n\n # plt.pause(1./100.)\n # time.sleep(1./100.)\n steps_counter += 1\n\n env.close()\n input('Press a key to finish the script...')\n\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5106266736984253, "alphanum_fraction": 0.5166212320327759, "avg_line_length": 32.3636360168457, "blob_id": "6d063574284cf26b547586fa4c35affe7d3f82e7", "content_id": "69001e3a2d42fee077abb6be69b83e1c17bb9783", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "permissive", "max_line_length": 75, "num_lines": 55, "path": "/robolearn/torch/models/values/nn_qfunction.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.torch.utils.nn import FlattenMlp\nfrom robolearn.utils.serializable import Serializable\nfrom robolearn.models import QFunction\n\n\nclass NNQFunction(FlattenMlp, Serializable, QFunction):\n def __init__(self,\n obs_dim,\n action_dim,\n hidden_sizes=(100, 100),\n **kwargs):\n QFunction.__init__(self,\n obs_dim=obs_dim,\n action_dim=action_dim)\n\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n self.save_init_params(locals())\n FlattenMlp.__init__(self,\n hidden_sizes=hidden_sizes,\n input_size=obs_dim+action_dim,\n output_size=1,\n **kwargs\n )\n\n def get_value(self, obs_np, act_np, **kwargs):\n values, info_dict = \\\n self.get_values(obs_np[None], act_np[None], **kwargs)\n\n for key, val in info_dict.items():\n if isinstance(val, np.ndarray):\n info_dict[key] = val[0, :]\n\n return values[0, :], info_dict\n\n def get_values(self, obs_np, act_np, **kwargs):\n return self.eval_np(obs_np, act_np, **kwargs)\n\n def forward(self, obs, action, return_preactivations=False):\n nn_ouput = \\\n FlattenMlp.forward(self, obs, action,\n return_preactivations=return_preactivations)\n\n if return_preactivations:\n value = nn_ouput[0]\n pre_activations = nn_ouput[1]\n info_dict = dict(\n pre_activations=pre_activations,\n )\n else:\n value = nn_ouput\n info_dict = dict()\n\n return value, info_dict\n" }, { "alpha_fraction": 0.4829931855201721, "alphanum_fraction": 0.4989215135574341, "avg_line_length": 32.483333587646484, "blob_id": "923359d20a33fb5d860982094b74c5fcf28c4bce", "content_id": "c7d0ba7f0c269b163b727eb69a514239a3d2b547", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6027, "license_type": "permissive", "max_line_length": 82, "num_lines": 180, "path": "/robolearn/envs/simple_envs/navigation2d/navigation2d_goalcompo_multiq_plot.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button, RadioButtons\nimport os\n# import robolearn.torch.pytorch_util as ptu\nfrom robolearn.utils.plots import canvas_draw\n\n\nclass MultiQFPolicyPlotter:\n def __init__(self, i_qf, i_policy, u_qf, u_policy, obs_lst,\n default_action, n_samples,\n render=False, save_path=None):\n self._i_qf = i_qf\n self._i_policy = i_policy\n self._u_qf = u_qf\n self._u_policy = u_policy\n\n # Convert to cuda if they didn't\n if ptu.gpu_enabled():\n self._i_qf.cuda()\n self._i_policy.cuda()\n self._u_qf.cuda()\n self._u_policy.cuda()\n\n self._obs_lst = np.array(obs_lst)\n self._default_action = default_action\n self._n_samples = n_samples\n\n self._var_inds = np.where(np.isnan(default_action))[0]\n assert len(self._var_inds) == 2\n\n self._n_demons = self._u_policy.n_heads\n\n n_plots = self._n_demons + 1\n\n x_size = 5 * n_plots\n y_size = 5\n\n self._fig = plt.figure(figsize=(x_size, y_size))\n self._ax_lst = []\n self._line_objects = list()\n\n # plt.subplots_adjust(left=0.3)\n plt.subplots_adjust(left=0.10)\n for i in range(n_plots):\n ax = self._fig.add_subplot(100 + n_plots * 10 + i + 1)\n ax.set_xlim((-1, 1))\n ax.set_ylim((-1, 1))\n ax.grid(True)\n if i == 0:\n ax.set_title('Intentional')\n else:\n ax.set_title('Unintentional %02d' % (i))\n ax.set_xlabel('Xvel')\n ax.set_ylabel('Yvel')\n self._ax_lst.append(ax)\n\n self._current_obs_idx = 1\n self._obs_labels = [str(obs) for obs in self._obs_lst]\n self._fig.canvas.set_window_title('Observation ' +\n self._obs_labels[self._current_obs_idx])\n self._plot_level_curves()\n self._plot_action_samples()\n\n self._radio_ax = \\\n self._fig.add_axes([0.01, 0.48, 0.06, 0.05*len(self._obs_lst)])\n self._radio_button = RadioButtons(self._radio_ax, self._obs_labels,\n active=self._current_obs_idx)\n self._radio_button.on_clicked(self.radio_update_plots)\n\n if save_path is None:\n self._save_path = '/home/desteban/logs/q_plots'\n else:\n self._save_path = save_path\n\n if render:\n plt.show(block=False)\n canvas_draw(self._fig.canvas, 0.05)\n\n def radio_update_plots(self, label):\n idx = self._obs_labels.index(label)\n self._current_obs_idx = idx\n\n self._fig.canvas.set_window_title('Observation ' +\n self._obs_labels[self._current_obs_idx])\n\n self.draw()\n\n def save_figure(self, itr=0):\n fig_title = self._fig.suptitle(\"Iteration %02d\" % itr, fontsize=14)\n prev_obs_idx = self._current_obs_idx\n self._radio_ax.set_visible(False)\n canvas_draw(self._fig.canvas, 0.01)\n\n for oo, label in enumerate(self._obs_labels):\n self._current_obs_idx = oo\n self.draw()\n\n fig_log_path = os.path.join(self._save_path,\n 'obs%02d' % oo,\n )\n\n if not os.path.isdir(fig_log_path):\n os.makedirs(fig_log_path)\n\n fig_log_name = os.path.join(fig_log_path,\n ('%02d' % itr).zfill(4)\n )\n\n self._fig.savefig(fig_log_name)\n\n self._current_obs_idx = prev_obs_idx\n self._radio_ax.set_visible(True)\n fig_title.set_visible(False)\n canvas_draw(self._fig.canvas, 0.01)\n\n def draw(self):\n # noinspection PyArgumentList\n [h.remove() for h in self._line_objects]\n self._line_objects = list()\n\n self._plot_level_curves()\n self._plot_action_samples()\n\n canvas_draw(self._fig.canvas, 0.01)\n\n def _plot_level_curves(self):\n # Create mesh grid.\n xs = np.linspace(-1, 1, 50)\n ys = np.linspace(-1, 1, 50)\n xgrid, ygrid = np.meshgrid(xs, ys)\n N = len(xs)*len(ys)\n\n # Copy default values along the first axis and replace nans with\n # the mesh grid points.\n actions = np.tile(self._default_action, (N, 1)).astype(np.float32)\n actions[:, self._var_inds[0]] = xgrid.ravel()\n actions[:, self._var_inds[1]] = ygrid.ravel()\n\n obs = self._obs_lst[self._current_obs_idx]\n for dd in range(self._n_demons+1):\n ax = self._ax_lst[dd]\n\n obs = obs.astype(np.float32)\n\n if dd == 0:\n qf = self._i_qf\n if qf is not None:\n qs = qf.get_values(np.tile(obs, (N, 1)), actions)[0]\n else:\n qs = np.zeros(N)\n else:\n qf = self._u_qf\n qs = qf.get_values(np.tile(obs, (N, 1)), actions,\n val_idxs=[dd-1])[0][-1]\n\n qs = qs.reshape(xgrid.shape)\n\n cs = ax.contour(xgrid, ygrid, qs, 20)\n self._line_objects += cs.collections\n self._line_objects += ax.clabel(\n cs, inline=1, fontsize=10, fmt='%.2f')\n\n def _plot_action_samples(self):\n obs = self._obs_lst[self._current_obs_idx]\n\n for dd in range(self._n_demons+1):\n ax = self._ax_lst[dd]\n\n if dd == 0:\n actions = self._i_policy.get_actions(\n np.ones((self._n_samples, 1)) * obs[None, :])[0]\n else:\n actions = self._u_policy.get_actions(\n np.ones((self._n_samples, 1)) * obs[None, :],\n pol_idxs=[dd-1]\n )[0]\n\n x, y = actions[:, 0], actions[:, 1]\n self._line_objects += ax.plot(x, y, 'b*')\n" }, { "alpha_fraction": 0.5786722302436829, "alphanum_fraction": 0.5961840152740479, "avg_line_length": 27.55223846435547, "blob_id": "91caa83b2d3e906d18d46ab42b65fc53dcecdfb3", "content_id": "5665845f309321ae3e2c13f552ff228071448a2d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3826, "license_type": "permissive", "max_line_length": 79, "num_lines": 134, "path": "/examples/rl_algos/spinningup/reacher/load_ppo.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import time\nimport joblib\nimport os\nimport os.path as osp\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom spinup import EpochLogger\nfrom spinup.utils.logx import restore_tf_graph\n\nfrom robolearn_gym_envs.pybullet import Reacher2D3DofGoalCompoEnv\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\n\ndef load_policy(fpath, itr='last', deterministic=False):\n # handle which epoch to load from\n if itr == 'last':\n saves = [int(x[11:])\n for x in os.listdir(fpath)\n if 'simple_save' in x and len(x) > 11]\n itr = '%d' % max(saves) if len(saves) > 0 else ''\n else:\n itr = '%d' % itr\n\n # load the things!\n sess = tf.Session()\n model = restore_tf_graph(sess, osp.join(fpath, 'simple_save'+itr))\n\n # get the correct op for executing actions\n if deterministic and 'mu' in model.keys():\n # 'deterministic' is only a valid option for SAC policies\n print('Using deterministic action op.')\n action_op = model['mu']\n else:\n print('Using default action op.')\n action_op = model['pi']\n\n # make function for producing an action given a single state\n get_action = lambda x: \\\n sess.run(action_op, feed_dict={model['x']: x[None, :]})[0]\n\n return get_action\n\n\ndef run_policy(env, policy, max_ep_len=None, num_episodes=100, render=True):\n\n logger = EpochLogger()\n obs, reward, done, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0\n while n < num_episodes:\n if render:\n env.render()\n time.sleep(1e-3)\n\n action = policy(obs)\n obs, reward, done, _ = env.step(action)\n ep_ret += reward\n ep_len += 1\n\n if done or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n print('Episode %d \\t EpRet %.3f \\t EpLen %d' % (n, ep_ret, ep_len))\n obs, reward, done, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n n += 1\n\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.dump_tabular()\n\n\ndef load_env(render=True):\n\n SEED = 660\n SUBTASK = None\n\n env_params = dict(\n is_render=render,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n obs_with_ori=False,\n obs_with_goal=True,\n # obs_with_goal=False,\n # goal_pose=(0.65, 0.65),\n goal_pose=(0.65, 0.35),\n # rdn_goal_pos=True,\n rdn_goal_pos=False,\n robot_config=None,\n rdn_robot_config=True,\n goal_cost_weight=4.0e0,\n ctrl_cost_weight=5.0e-1,\n goal_tolerance=0.01,\n use_log_distances=True,\n log_alpha=1e-6,\n # max_time=PATH_LENGTH*DT,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n half_env=True,\n subtask=SUBTASK,\n seed=SEED,\n )\n\n env = Reacher2D3DofGoalCompoEnv(**env_params)\n\n return env\n\n\ndef main(args):\n policy = load_policy(args.dir, deterministic=args.deterministic)\n env = load_env(render=not args.norender)\n\n run_policy(env, policy, args.horizon, args.episodes, not args.norender)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', type=str, default='.',\n help='path to the tf directory')\n parser.add_argument('--horizon', '-H', type=int, default=1000)\n parser.add_argument('--episodes', '-n', type=int, default=100)\n parser.add_argument('--deterministic', '-d', action='store_true')\n parser.add_argument('--norender', '-nr', action='store_true')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.6546192765235901, "alphanum_fraction": 0.6584771871566772, "avg_line_length": 31.38157844543457, "blob_id": "6c0aec4cdccd4f669ec76a41f9501915d1dee60d", "content_id": "54001a57249da6d15126e77b9afba5ab65924ee5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4925, "license_type": "permissive", "max_line_length": 140, "num_lines": 152, "path": "/scenarios/tests/ros-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\n# Threading modules\nimport threading\n\nfrom robolearn.old_envs.base import EnvInterface\n\nfrom std_msgs.msg import Float64\nfrom gazebo_msgs.msg import ModelStates\n\nfrom custom_effort_controllers.msg import CommandArrayStamped\nfrom sensor_msgs.msg import JointState as JointStateMsg\nfrom sensor_msgs.msg import Imu as ImuMsg\nfrom gazebo_msgs.msg import ContactState as ContactStateMsg\nfrom geometry_msgs.msg import WrenchStamped as WrenchStampedMsg\n\nfrom std_srvs.srv import Empty\n\nimport rospy\n\nimport subprocess\nimport os\nimport signal\nimport time\n\nROSInterface_mode = ['simulation', 'real']\n\n\nclass ROSEnvInterface(EnvInterface):\n\n def __init__(self, mode='simulation'):\n if mode != 'simulation' and mode != 'real':\n raise ValueError(\"Wrong ROSEnvInterface mode. Options: 'simulation' or 'real'.\")\n\n if mode == 'real':\n raise NotImplementedError(\"ROSEnvInterface 'real' mode not implemented yet.\")\n\n self.mode = mode\n\n print(\"ROSEnvInterface mode: '%s'.\" % self.mode)\n\n rospy.init_node('ROSEnvInterface')\n\n #self.reset_proxy = self.ros_service_proxy('/gazebo/reset_world', Empty)\n self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_world', Empty)\n\n self.last_obs = None\n self.last_act = None\n\n\n # ROS Publishers\n self.group_torque_pub = rospy.Publisher(\"/bigman/group_position_torque_controller/command\", CommandArrayStamped, queue_size=10)\n\n # ROS Subscribers\n self.subs_joint_state = rospy.Subscriber(\"/bigman/joint_states\", JointStateMsg, self.callback_joint_state)\n self.subs_imu1 = rospy.Subscriber(\"/bigman/sensor/IMU1\", ImuMsg, self.callback_imu1)\n self.subs_imu2 = rospy.Subscriber(\"/bigman/sensor/IMU2\", ImuMsg, self.callback_imu2)\n #self.subs_foot_bumper_l = rospy.Subscriber(\"/bigman/sensor/bumper/LFoot_bumper\", ContactStateMsg, self.callback_foot_bumper_l)\n #self.subs_foot_bumper_r = rospy.Subscriber(\"/bigman/sensor/bumper/RFoot_bumper\", ContactStateMsg, self.callback_foot_bumper_r)\n self.subs_ft_l = rospy.Subscriber(\"/bigman/sensor/ft_sensor/LAnkle\", WrenchStampedMsg, self.callback_ft_l)\n self.subs_ft_r = rospy.Subscriber(\"/bigman/sensor/ft_sensor/RAnkle\", WrenchStampedMsg, self.callback_ft_r)\n\n # Last sensor data\n self.last_joint_state = None\n self.last_imu1 = None\n self.last_imu2 = None\n self.last_foot_bumper_l = None\n self.last_foot_bumper_r = None\n self.last_ft_l = None\n self.last_ft_r = None\n\n\n def run(self):\n # self.\n pass\n\n\n def callback_joint_state(self, msg):\n self.last_joint_state = msg\n #print(\"Receiving joint state\")\n\n def callback_imu1(self, msg):\n self.last_imu1 = msg\n #print(\"Receiving imu1\")\n\n def callback_imu2(self, msg):\n self.last_imu2 = msg\n #print(\"Receiving imu2\")\n\n def callback_foot_bumper_l(self, msg):\n self.last_foot_bumper_l = msg\n #print(\"Receiving left foot bumper\")\n\n def callback_foot_bumper_r(self, msg):\n self.last_foot_bumper_r = msg\n #print(\"Receiving right foot bumper\")\n\n def callback_ft_l(self, msg):\n self.last_ft_l = msg\n #print(\"Receiving left FT sensor\")\n\n def callback_ft_r(self, msg):\n self.last_ft_r = msg\n #print(\"Receiving right FT sensor\")\n\n\n # def ros_service_proxy(self, srv_name=None, srv_type=None):\n # if srv_name is None:\n # raise ValueError(\"No service name has been specified.\")\n # if srv_type is None:\n # raise ValueError(\"No service type has been specified.\")\n #\n # print(\"Waiting for service '%s'...\" % srv_name)\n # rospy.wait_for_service(srv_name)\n # return rospy.ServiceProxy(srv_name, srv_type)\n\n\n def reset(self):\n print(\"Resetting gazebo...\")\n rospy.wait_for_service('/gazebo/reset_world')\n try:\n self.reset_proxy() # It does not response anything\n #print(\"/gazebo/reset_world service response: %s\" % str(reset_response))\n except rospy.ServiceException as exc:\n print(\"/gazebo/reset_world service call failed: %s\" % str(exc))\n\n\n\nif __name__ == '__main__':\n # Create a ROS EnvInterface\n ros_interface = ROSEnvInterface('simulation')\n\n while not rospy.is_shutdown():\n rospy.spin()\n\n #raw_input(\"Press a key...\")\n ros_interface.reset()\n\n while True:\n print(\"holaaa\")\n time.sleep(1)\n\n\n\n\n\n\n # #process_roscore = subprocess.Popen(\"roscore\")\n# roslaunch_file = \"/home/domingo/robotlearning-superbuild/catkin_ws/src/bigman/bigman_gazebo/launch/bigman_floating_base_whole_body.launch\"\n# process_roslaunch = subprocess.Popen([\"roslaunch\", roslaunch_file])#, preexec_fn=os.setsid)\n# raw_input(\"APRETAR PARA MATAR\")\n# os.killpg(os.getpgid(process_roslaunch.pid), signal.SIGTERM)\n\n\n\n" }, { "alpha_fraction": 0.5194961428642273, "alphanum_fraction": 0.5397701859474182, "avg_line_length": 43.47991180419922, "blob_id": "1a0256c17b7ae830f0e38ab503522286f4830486", "content_id": "78de19017c4dd1100606cf72b5775b278b134e66", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19927, "license_type": "permissive", "max_line_length": 154, "num_lines": 448, "path": "/scenarios/dualist_gps/reacher_pi2trajopt/scenario.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\nimport yaml\n\nimport numpy as np\nfrom robolearn.old_envs.pusher3dof import Pusher3DofBulletEnv\nfrom robolearn.old_utils.sample.sampler import Sampler\n\nfrom robolearn.old_agents import NoPolAgent\nfrom robolearn.old_algos.trajopt.pi2_trajopt import PI2TrajOpt\n# Costs\nfrom robolearn.old_costs.cost_action import CostAction\n# from robolearn.costs.cost_fk import CostFK\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_safe_distance import CostSafeDistance\nfrom robolearn.old_costs.cost_state_difference import CostStateDifference\nfrom robolearn.old_costs.cost_safe_state_difference import CostSafeStateDifference\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\nfrom robolearn.old_costs.cost_utils import evall1l2term\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_dual_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\nfrom robolearn.old_utils.print_utils import change_print_color\nfrom robolearn.old_utils.transformations_utils import create_quat_pose\nfrom robolearn.old_utils.traj_opt.traj_opt_pi2 import TrajOptPI2\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been killed by the user!!\")\n os._exit(1)\n\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\nclass Scenario(object):\n def __init__(self, hyperparams):\n\n self.hyperparams = hyperparams\n\n # Task Parameters\n yaml_path = os.path.dirname(__file__) + '/task_parameters.yaml'\n assert(os.path.exists(yaml_path))\n with open(yaml_path, 'r') as f:\n self.task_params = yaml.load(f)\n\n Tend = self.task_params['Tend']\n Ts = self.task_params['Ts']\n self.task_params['T'] = int(Tend/Ts)\n\n if self.hyperparams['render']:\n self.task_params['render'] = self.hyperparams['render']\n\n\n # Numpy max\n os.environ['OMP_NUM_THREADS'] = str(self.task_params['np_threads'])\n\n # Environment\n self.env = self.create_environment()\n\n self.action_dim = self.env.action_dim\n self.state_dim = self.env.state_dim\n self.obs_dim = self.env.obs_dim\n\n # Agent\n self.agent = self.create_agent()\n\n # Costs\n self.cost = self.create_cost()\n\n # Initial Conditions\n self.init_cond = self.create_init_conditions()\n\n # Learning algo\n self.learn_algo = self.create_learning_algo()\n\n def create_environment(self):\n change_print_color.change('BLUE')\n print(\"\\nCreating Environment...\")\n\n # Environment parameters\n env_with_img = False\n rdn_tgt_pos = False\n render = self.task_params['render']\n obs_like_mjc = self.task_params['obs_like_mjc']\n ntargets = self.task_params['ntargets']\n tgt_weights = self.task_params['tgt_weights']\n tgt_positions = self.task_params['tgt_positions']\n tgt_types = self.task_params['tgt_types']\n env = Pusher3DofBulletEnv(render=render, obs_with_img=env_with_img,\n obs_mjc_gym=obs_like_mjc, ntargets=ntargets,\n rdn_tgt_pos=rdn_tgt_pos, tgt_types=tgt_types)\n\n env.set_tgt_cost_weights(tgt_weights)\n env.set_tgt_pos(tgt_positions)\n\n print(\"Environment:%s OK!.\" % type(env).__name__)\n\n return env\n\n def create_agent(self):\n change_print_color.change('CYAN')\n print(\"\\nCreating Agent...\")\n\n policy_params = {}\n\n policy_opt = {\n 'type': PolicyOptTf,\n 'hyperparams': policy_params\n }\n\n agent = NoPolAgent(act_dim=self.action_dim, obs_dim=self.obs_dim,\n state_dim=self.state_dim,\n agent_name='agent'+str('%02d' %\n self.hyperparams['run_num']))\n print(\"Agent:%s OK\\n\" % type(agent).__name__)\n\n return agent\n\n def create_cost(self):\n change_print_color.change('GRAY')\n print(\"\\nCreating Costs...\")\n\n # Action Cost\n weight = 1e0 # 1e-4\n target = None\n act_cost = {\n 'type': CostAction,\n 'wu': np.ones(self.action_dim) * weight,\n 'target': target, # Target action value\n }\n\n # # FK Cost\n # fk_l1_cost = {\n # 'type': CostFK,\n # 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n # 'target_pose': target_distance_hand,\n # 'tgt_data_type': 'state', # 'state' or 'observation'\n # 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n # 'op_point_name': hand_name,\n # 'op_point_offset': hand_offset,\n # 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n # 'joint_ids': bigman_params['joint_ids'][body_part_active],\n # 'robot_model': robot_model,\n # 'wp': np.array([3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'evalnorm': evall1l2term,\n # #'evalnorm': evallogl2term,\n # 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n # 'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n # 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n # 'wp_final_multiplier': 1, # 10\n # }\n #\n # fk_l2_cost = {\n # 'type': CostFK,\n # 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n # 'target_pose': target_distance_hand,\n # 'tgt_data_type': 'state', # 'state' or 'observation'\n # 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n # 'op_point_name': hand_name,\n # 'op_point_offset': hand_offset,\n # 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n # 'joint_ids': bigman_params['joint_ids'][body_part_active],\n # 'robot_model': robot_model,\n # # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n # #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'evalnorm': evall1l2term,\n # #'evalnorm': evallogl2term,\n # 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n # 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n # 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n # 'wp_final_multiplier': 1, # 10\n # }\n\n\n # State costs\n target_distance_object = np.zeros(2)\n # input(self.env.get_state_info())\n # input(self.env.get_state_info(name='tgt0')['idx'])\n state_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt0': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None,\n 'data_idx': self.env.get_state_info(name='tgt0')['idx']\n },\n },\n }\n\n state_final_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt0': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None,\n 'data_idx': self.env.get_state_info(name='tgt0')['idx']\n },\n },\n }\n\n cost_safe_distance = {\n 'type': CostSafeDistance,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt1': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'safe_distance': np.array([0.15, 0.15]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([1.0, 1.0]),\n 'data_idx': self.env.get_state_info(name='tgt1')['idx']\n },\n },\n }\n\n cost_state_difference = {\n 'type': CostStateDifference,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'wp': np.array([1.0, 1.0, 1.0]), # State weights - must be set.\n 'target_state': 'tgt0', # Target state - must be set.\n 'average': None,\n 'tgt_idx': self.env.get_state_info(name='tgt0')['idx'],\n 'data_idx': self.env.get_state_info(name='ee')['idx'],\n 'idx_to_use': [0, 1, 2], # All: X, Y, theta\n },\n },\n }\n\n cost_safe_state_difference = {\n 'type': CostSafeStateDifference,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'safe_distance': np.array([0.15, 0.15]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([1.0, 1.0]),\n 'target_state': 'tgt1', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt1')['idx'][:2],\n 'data_idx': self.env.get_state_info(name='ee')['idx'][:2],\n 'idx_to_use': [0, 1], # Only X and Y\n },\n },\n }\n\n\n # Sum costs\n # costs_and_weights = [(act_cost, 1.0e-1),\n costs_and_weights = [(act_cost, 1.0e-5),\n # # (fk_cost, 1.0e-0),\n # (fk_l1_cost, 1.5e-1),\n # (fk_l2_cost, 1.0e-0),\n # # (fk_final_cost, 1.0e-0),\n # (fk_l1_final_cost, 1.5e-1),\n # (fk_l2_final_cost, 1.0e-0),\n (cost_state_difference, 5.0e-0),\n (cost_safe_state_difference, 1.0e+1),\n # WORKING:\n # (cost_safe_distance, 1.0e+1),\n # (state_cost_distance, 5.0e-0),\n # (state_final_cost_distance, 1.0e+3),\n ]\n\n cost_sum = {\n 'type': CostSum,\n 'costs': [cw[0] for cw in costs_and_weights],\n 'weights': [cw[1] for cw in costs_and_weights],\n }\n\n return cost_sum\n\n def create_init_conditions(self):\n change_print_color.change('MAGENTA')\n print(\"\\nCreating Initial Conditions...\")\n initial_cond = self.task_params['init_cond']\n\n ddof = 3 # Data dof (file): x, y, theta\n pdof = 3 # Pose dof (env): x, y, theta\n ntgt = self.task_params['ntargets']\n\n for cc, cond in enumerate(initial_cond):\n condition = np.zeros(self.env.obs_dim)\n condition[:self.env.action_dim] = np.deg2rad(cond[:3])\n cond_idx = 2*self.env.action_dim + pdof # EE pose will be obtained from sim\n data_idx = self.env.action_dim\n for tt in range(self.task_params['ntargets']):\n tgt_data = cond[data_idx:data_idx+ddof]\n # tgt_pose = create_quat_pose(pos_x=tgt_data[0],\n # pos_y=tgt_data[1],\n # pos_z=z_fix,\n # rot_yaw=np.deg2rad(tgt_data[2]))\n # condition[cond_idx:cond_idx+pdof] = tgt_pose\n tgt_data[2] = np.deg2rad(tgt_data[2])\n condition[cond_idx:cond_idx+pdof] = tgt_data\n cond_idx += pdof\n data_idx += ddof\n\n self.env.add_init_cond(condition)\n\n return self.env.get_conditions()\n\n def create_learning_algo(self):\n change_print_color.change('YELLOW')\n print(\"\\nConfiguring learning algorithm...\\n\")\n\n # Learning params\n resume_training_itr = None # Resume from previous training iteration\n\n # Dynamics\n learned_dynamics = None\n\n init_traj_distr = {'type': init_pd,\n 'init_var': np.array([1.0, 1.0, 1.0])*5.0e-01,\n 'pos_gains': 0.001, # float or array\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': self.env.get_total_joints(), # Total joints in state\n 'state_to_pd': 'joints', # Joints\n 'dDistance': 6,\n }\n\n # Trajectory Optimization Method\n traj_opt_method = {\n 'type': TrajOptPI2,\n 'kl_threshold': 1.0, # KL-divergence threshold between old and new policies.\n 'covariance_damping': 2.0, # If greater than zero, covariance is computed as a multiple of the old covariance.\n # Multiplier is taken to the power (1 / covariance_damping). If greater than one, slows\n # down convergence and keeps exploration noise high for more iterations.\n 'min_temperature': 0.001, # Minimum bound of the temperature optimization for the soft-max probabilities of the\n # policy samples.\n 'use_sumexp': False,\n 'pi2_use_dgd_eta': False,\n 'pi2_cons_per_step': True,\n 'min_eta': 1e-8,\n 'max_eta': 1e16,\n 'del0': 1e-4,\n }\n\n good_trajs = None\n bad_trajs = None\n pi2trajopt_hyperparams = {\n 'inner_iterations': self.task_params['inner_iterations'], # Times the trajectories are updated\n # Initial dual variables\n 'init_eta': 4.62,\n # KL step (epsilon)\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'kl_step': 0.2, # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step)\n 'max_step_mult': 10.0, # Max possible value of step multiplier (multiplies kl_step)\n }\n\n gps_hyperparams = {\n 'T': self.task_params['T'], # Total points\n 'dt': self.task_params['Ts'],\n 'iterations': self.task_params['iterations'], # GPS episodes --> K iterations\n # Samples\n 'num_samples': self.task_params['num_samples'], # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n 'smooth_noise_var': 5.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': 5.e-0*np.ones(self.action_dim), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n # Cost\n 'cost': self.cost,\n # Conditions\n 'conditions': len(self.init_cond), # Total number of initial conditions\n 'train_conditions': self.task_params['train_cond'], # Indexes of conditions used for training\n # TrajDist\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': False,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-6, # Max value for x0sigma in trajectories\n # TrajOpt\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization #TODO: CHECK THIS VALUE\n # Others\n 'algo_hyperparams': pi2trajopt_hyperparams,\n 'data_files_dir': self.hyperparams['log_dir'],\n }\n\n return PI2TrajOpt(self.agent, self.env, **gps_hyperparams)\n\n def train(self, itr_load=None):\n return self.learn_algo.run(itr_load)\n\n def test_policy(self, pol_type=None, condition=0, iteration=-1):\n noise = np.zeros((self.task_params['T'], self.agent.act_dim))\n\n if iteration == -1:\n for rr in range(600):\n temp_path = self.hyperparams['log_dir'] + ('/itr_%02d' % rr)\n if os.path.exists(temp_path):\n iteration += 1\n\n if iteration == -1:\n print(\"There is not itr_XX data in '%s'\"\n % self.hyperparams['log_dir'])\n return False\n\n dir_path = 'itr_%02d/' % iteration\n\n itr_data_file = dir_path + 'iteration_data_itr_%02d.pkl' % iteration\n\n change_print_color.change('BLUE')\n print(\"\\nLoading iteration data '%s'...\" % itr_data_file)\n\n itr_data = self.learn_algo.data_logger.unpickle(itr_data_file)\n policy = itr_data[condition].traj_distr\n\n self.env.reset(condition=condition)\n input('Press a key to start sampling...')\n sample = self.agent.sample(self.env, condition, self.task_params['T'],\n self.task_params['Ts'], noise, policy=policy,\n save=False)\n return True\n" }, { "alpha_fraction": 0.5327956676483154, "alphanum_fraction": 0.5689437389373779, "avg_line_length": 47.50694274902344, "blob_id": "d67ee4260669d6fa8d1d67ffeaa9cf22b4e02d4b", "content_id": "4f4a28a4fad964655011e1f9a91b98fb1ec84637", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41911, "license_type": "permissive", "max_line_length": 174, "num_lines": 864, "path": "/scenarios/bigman-reach-lift-box.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\n\nimport numpy as np\nfrom robolearn.old_utils.sampler import Sampler\n\nfrom robolearn.old_agents import GPSAgent\nfrom robolearn.old_algos.gps.gps import GPS\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_fk import CostFK\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\nfrom robolearn.old_costs.cost_utils import evall1l2term\nfrom robolearn.old_envs import BigmanEnv\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.print_utils import change_print_color\nfrom robolearn.old_utils.robot_model import RobotModel\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import Reset_condition_bigman_box_gazebo\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import create_bigman_box_condition\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import create_box_relative_pose\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import create_hand_relative_pose\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import spawn_box_gazebo\nfrom robolearn.old_utils.tasks.bigman.lift_box_utils import task_space_torque_control_demos, \\\n load_task_space_torque_control_demos\nfrom robolearn.old_utils.traj_opt.traj_opt_lqr import TrajOptLQR\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been kill by the user!!\")\n os._exit(1)\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\n# ################## #\n# ################## #\n# ### PARAMETERS ### #\n# ################## #\n# ################## #\n# Task parameters\nTs = 0.01\nTreach = 5\nTlift = 0 # 3.8\nTinter = 0 # 0.5\nTend = 0 # 0.7\n# EndTime = 4 # Using final time to define the horizon\nEndTime = Treach + Tinter + Tlift + Tend # Using final time to define the horizon\ninit_with_demos = False\ndemos_dir = None # 'TASKSPACE_TORQUE_CTRL_DEMO_2017-07-21_16:32:39'\nseed = 6\n\nrandom.seed(seed)\nnp.random.seed(seed)\n\n# BOX\nbox_x = 0.70\nbox_y = 0.00\nbox_z = 0.0184\nbox_yaw = 0 # Degrees\nbox_size = [0.4, 0.5, 0.3]\nfinal_box_height = 0.0\nbox_relative_pose = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw)\n\n# Robot Model (It is used to calculate the IK cost)\n#robot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'\nrobot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf_file)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\n\ntouching_box_config = np.array([0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.,\n 0., 0., 0.,\n 0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633,\n #0., 0., 0., -1.5708, 0., 0., 0.,\n 0., 0.,\n 0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])\n #0., 0., 0., -1.5708, 0., 0., 0.])\n\n# ################### #\n# ################### #\n# ### ENVIRONMENT ### #\n# ################### #\n# ################### #\nchange_print_color.change('BLUE')\nprint(\"\\nCreating Bigman environment...\")\n\n# Robot configuration\ninterface = 'ros'\nbody_part_active = 'RA'\nbody_part_sensed = 'RA'\ncommand_type = 'effort'\n\n\nleft_hand_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0],\n hand_x=0.0, hand_y=box_size[1]/2-0.02, hand_z=0.0, hand_yaw=0)\n# left_hand_rel_pose[:] = left_hand_rel_pose[[3, 4, 5, 6, 0, 1, 2]] # Changing from 'pos+orient' to 'orient+pos'\nright_hand_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0],\n hand_x=0.0, hand_y=-box_size[1]/2+0.02, hand_z=0.0, hand_yaw=0)\n# right_hand_rel_pose[:] = right_hand_rel_pose[[3, 4, 5, 6, 0, 1, 2]] # Changing from 'pos+orient' to 'orient+pos'\n\nreset_condition_bigman_box_gazebo_fcn = Reset_condition_bigman_box_gazebo()\n\n\nobservation_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'ros_topic': '/xbotcore/bigman/joint_states',\n # 'fields': ['link_position', 'link_velocity', 'effort'],\n 'fields': ['link_position', 'link_velocity'],\n # 'joints': bigman_params['joint_ids']['UB']},\n 'joints': bigman_params['joint_ids'][body_part_sensed]},\n\n {'name': 'prev_cmd',\n 'type': 'prev_cmd'},\n\n {'name': 'distance_left_arm',\n 'type': 'fk_pose',\n 'body_name': LH_name,\n 'body_offset': l_soft_hand_offset,\n 'target_offset': left_hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n {'name': 'distance_right_arm',\n 'type': 'fk_pose',\n 'body_name': RH_name,\n 'body_offset': r_soft_hand_offset,\n 'target_offset': right_hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n # {'name': 'ft_left_arm',\n # 'type': 'fk_vel',\n # 'ros_topic': None,\n # 'body_name': LH_name,\n # 'body_offset': l_soft_hand_offset,\n # 'fields': ['orientation', 'position']},\n\n # {'name': 'ft_left_arm',\n # 'type': 'ft_sensor',\n # 'ros_topic': '/xbotcore/bigman/ft/l_arm_ft',\n # 'fields': ['force', 'torque']},\n\n # {'name': 'ft_right_arm',\n # 'type': 'ft_sensor',\n # 'ros_topic': '/xbotcore/bigman/ft/r_arm_ft',\n # 'fields': ['force', 'torque']},\n\n # {'name': 'ft_left_leg',\n # 'type': 'ft_sensor',\n # 'ros_topic': '/xbotcore/bigman/ft/l_leg_ft',\n # 'fields': ['force', 'torque']},\n\n # {'name': 'ft_right_leg',\n # 'type': 'ft_sensor',\n # 'ros_topic': '/xbotcore/bigman/ft/r_leg_ft',\n # 'fields': ['force', 'torque']},\n\n # {'name': 'imu1',\n # 'type': 'imu',\n # 'ros_topic': '/xbotcore/bigman/imu/imu_link',\n # 'fields': ['orientation', 'angular_velocity', 'linear_acceleration']},\n\n # {'name': 'optitrack',\n # 'type': 'optitrack',\n # 'ros_topic': '/optitrack/relative_poses',\n # 'fields': ['orientation', 'position'],\n # 'bodies': ['box']},\n ]\n\nstate_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'fields': ['link_position', 'link_velocity'],\n 'joints': bigman_params['joint_ids'][body_part_sensed]},\n\n {'name': 'prev_cmd',\n 'type': 'prev_cmd'},\n\n {'name': 'distance_left_arm',\n 'type': 'fk_pose',\n 'body_name': LH_name,\n 'body_offset': l_soft_hand_offset,\n 'target_offset': left_hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n {'name': 'distance_right_arm',\n 'type': 'fk_pose',\n 'body_name': RH_name,\n 'body_offset': r_soft_hand_offset,\n 'target_offset': right_hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n # {'name': 'optitrack',\n # 'type': 'optitrack',\n # 'fields': ['orientation', 'position'],\n # 'bodies': ['box']} # check if it is better relative position with EE(EEs)\n ]\n\noptional_env_params = {\n 'temp_object_name': 'box'\n}\n\n# Spawn Box first because it is simulation\nspawn_box_gazebo(box_relative_pose, box_size=box_size)\n\n# Create a BIGMAN ROS EnvInterface\nbigman_env = BigmanEnv(interface=interface, mode='simulation',\n body_part_active=body_part_active, command_type=command_type,\n observation_active=observation_active,\n state_active=state_active,\n cmd_freq=int(1/Ts),\n robot_dyn_model=robot_model,\n optional_env_params=optional_env_params,\n reset_simulation_fcn=reset_condition_bigman_box_gazebo_fcn)\n # reset_simulation_fcn=reset_condition_bigman_box_gazebo)\n\naction_dim = bigman_env.action_dim\nstate_dim = bigman_env.state_dim\nobservation_dim = bigman_env.obs_dim\n\nprint(\"Bigman Environment OK. body_part_active:%s (action_dim=%d). Command_type:%s\" % (body_part_active, action_dim,\n command_type))\n\n\n# ################# #\n# ################# #\n# ##### AGENT ##### #\n# ################# #\n# ################# #\nchange_print_color.change('CYAN')\nprint(\"\\nCreating Bigman Agent...\")\n\npolicy_params = {\n 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n 'network_params': {\n 'n_layers': 1, # Hidden layers??\n 'dim_hidden': [40], # List of size per n_layers\n 'obs_names': bigman_env.get_obs_info()['names'],\n 'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n },\n # Initialization.\n 'init_var': 0.1, # Initial policy variance.\n 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # Solver hyperparameters.\n 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n 'batch_size': 15,\n 'lr': 0.001, # Base learning rate (by default it's fixed).\n 'lr_policy': 'fixed', # Learning rate policy.\n 'momentum': 0.9, # Momentum.\n 'weight_decay': 0.005, # Weight decay.\n 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # set gpu usage.\n 'use_gpu': 1, # Whether or not to use the GPU for training.\n 'gpu_id': 0,\n 'random_seed': 1,\n 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n # 'weights_file_prefix': EXP_DIR + 'policy',\n}\npolicy_opt = {\n 'type': PolicyOptTf,\n 'hyperparams': policy_params\n }\n\nbigman_agent = GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy_opt=policy_opt)\nprint(\"Bigman Agent:%s OK\\n\" % type(bigman_agent))\n\n\n# ################# #\n# ################# #\n# ##### COSTS ##### #\n# ################# #\n# ################# #\n# Action Cost\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(action_dim) * 1e-4,\n 'target': None, # Target action value\n}\n\n# State Cost\ntarget_distance_right_arm = np.zeros(6)\n# state_cost_distance = {\n# 'type': CostState,\n# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'l1': 0.1, # Weight for l1 norm\n# 'l2': 1.0, # Weight for l2 norm\n# 'alpha': 1e-2, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 10.0, # Weight multiplier on final time step.\n# 'data_types': {\n# 'distance_left_arm': {\n# # 'wp': np.ones_like(target_state), # State weights - must be set.\n# 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 1.0]), # State weights - must be set.\n# 'target_state': target_distance_left_arm, # Target state - must be set.\n# 'average': None, # (12, 3),\n# 'data_idx': bigman_env.get_state_info(name='distance_left_arm')['idx']\n# },\n# 'distance_right_arm': {\n# # 'wp': np.ones_like(target_state), # State weights - must be set.\n# 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 1.0]), # State weights - must be set.\n# 'target_state': target_distance_right_arm, # Target state - must be set.\n# 'average': None, # (12, 3),\n# 'data_idx': bigman_env.get_state_info(name='distance_right_arm')['idx']\n# },\n# },\n# }\n\nRAfk_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_right_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],\n 'joint_ids': bigman_params['joint_ids']['RA'],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nRAfk_l1_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_right_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],\n 'joint_ids': bigman_params['joint_ids']['RA'],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nRAfk_l2_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_right_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],\n 'joint_ids': bigman_params['joint_ids']['RA'],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nRAfk_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_right_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],\n 'joint_ids': bigman_params['joint_ids']['RA'],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10,\n}\n\nRAfk_l1_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_right_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],\n 'joint_ids': bigman_params['joint_ids']['RA'],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10,\n}\n\nRAfk_l2_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_right_arm,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n 'op_point_name': RH_name,\n 'op_point_offset': r_soft_hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],\n 'joint_ids': bigman_params['joint_ids']['RA'],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n 'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10,\n}\n\n\n\n\n\n\n\n# RAfk_cost = {\n# 'type': CostFK,\n# 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'target_pose': target_distance_right_arm,\n# 'tgt_data_type': 'state', # 'state' or 'observation'\n# 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n# 'op_point_name': RH_name,\n# 'op_point_offset': r_soft_hand_offset,\n# 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n# 'joint_ids': bigman_params['joint_ids']['RA'],\n# 'robot_model': robot_model,\n# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n# 'l2': 1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n# 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 1, # 10\n# }\n\n# RAfk_final_cost = {\n# 'type': CostFK,\n# 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'target_pose': target_distance_left_arm,\n# 'tgt_data_type': 'state', # 'state' or 'observation'\n# 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],\n# 'op_point_name': RH_name,\n# 'op_point_offset': r_soft_hand_offset,\n# 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n# 'joint_ids': bigman_params['joint_ids']['RA'],\n# 'robot_model': robot_model,\n# 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n# 'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n# 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 10,\n# }\n\n\n# target_state_box = box_relative_pose.copy()\n# target_state_box[-1] += final_box_height\n# state_cost = {\n# 'type': CostState,\n# 'ramp_option': RAMP_LINEAR, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'l1': 0.0, # Weight for l1 norm\n# 'l2': 1.0, # Weight for l2 norm\n# 'alpha': 1e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 5.0, # Weight multiplier on final time step.\n# 'data_types': {\n# 'optitrack': {\n# # 'wp': np.ones_like(target_state), # State weights - must be set.\n# 'wp': np.array([1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0]), # State weights - must be set.\n# 'target_state': target_state_box, # Target state - must be set.\n# 'average': None, # (12, 3),\n# 'data_idx': bigman_env.get_state_info(name='optitrack')['idx']\n# },\n# # 'link_position': {\n# # 'wp': np.ones_like(target_pos), # State weights - must be set.\n# # 'target_state': target_pos, # Target state - must be set.\n# # 'average': None, #(12, 3),\n# # 'data_idx': bigman_env.get_state_info(name='link_position')['idx']\n# # },\n# # 'link_velocity': {\n# # 'wp': np.ones_like(target_vel), # State weights - must be set.\n# # 'target_state': target_vel, # Target state - must be set.\n# # 'average': None, #(12, 3),\n# # 'data_idx': bigman_env.get_state_info(name='link_velocity')['idx']\n# # },\n# },\n# }\n\n# LAfk_cost = {\n# 'type': CostFKRelative,\n# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'target_rel_pose': left_hand_rel_pose,\n# 'rel_data_type': 'state', # 'state' or 'observation'\n# # 'rel_data_name': 'optitrack', # Name of the state/observation\n# # 'rel_idx': bigman_env.get_obs_info(name='optitrack')['idx'],\n# 'rel_idx': bigman_env.get_state_info(name='optitrack')['idx'],\n# 'data_idx': bigman_env.get_state_info(name='link_position')['idx'],\n# 'op_point_name': LH_name,\n# 'op_point_offset': l_soft_hand_offset,\n# 'joint_ids': bigman_params['joint_ids']['LA'],\n# 'robot_model': robot_model,\n# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'wp': np.array([0.5, 0.5, 0.5, 3.0, 3.0, 1.5]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'l1': 0.1, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n# 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n# 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 10,\n# }\n\n# RAfk_cost = {\n# 'type': CostFKRelative,\n# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT,LINEAR, QUADRATIC, FINAL_ONLY\n# 'target_rel_pose': right_hand_rel_pose,\n# 'rel_data_type': 'observation', # 'state' or 'observation'\n# # 'rel_data_name': 'optitrack', # Name of the state/observation\n# 'rel_idx': bigman_env.get_obs_info(name='optitrack')['idx'],\n# 'data_idx': bigman_env.get_state_info(name='link_position')['idx'],\n# 'op_point_name': RH_name,\n# 'op_point_offset': r_soft_hand_offset,\n# 'joint_ids': bigman_params['joint_ids']['RA'],\n# 'robot_model': robot_model,\n# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'wp': np.array([0.5, 0.5, 0.5, 3.0, 3.0, 1.5]), # one dim less because 'quat' error | 1)orient 2)pos\n# 'l1': 0.1, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n# 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n# 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 10,\n# }\n\ncost_sum = {\n 'type': CostSum,\n # 'costs': [act_cost, state_cost_distance],\n # 'weights': [1.0e-2, 1.0e-0],\n # 'costs': [act_cost, LAfk_cost, RAfk_cost, state_cost],\n # 'weights': [1.0e-2, 1.0e-0, 1.0e-0, 5.0e-1],\n #'costs': [act_cost, LAfk_cost, LAfk_final_cost],\n #'weights': [1.0e-1, 1.0e-0, 1.0e-0],\n 'costs': [act_cost, LAfk_l1_cost, LAfk_l2_cost, LAfk_l1_final_cost, LAfk_l2_final_cost, RAfk_l1_cost, RAfk_l2_cost, RAfk_l1_final_cost, RAfk_l2_final_cost],\n 'weights': [1.0e-2, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0],\n # 'costs': [act_cost, state_cost],#, LAfk_cost, RAfk_cost],\n # 'weights': [0.1, 5.0],\n}\n\n\n# ########## #\n# ########## #\n# Conditions #\n# ########## #\n# ########## #\nq0 = np.zeros(31)\nq0[15] = np.deg2rad(25)\nq0[16] = np.deg2rad(40)\nq0[18] = np.deg2rad(-75)\n#q0[15:15+7] = [0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633]\nq0[24] = np.deg2rad(25)\nq0[25] = np.deg2rad(-40)\nq0[27] = np.deg2rad(-75)\n#q0[24:24+7] = [0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633]\nbox_pose0 = box_relative_pose.copy()\ncondition0 = create_bigman_box_condition(q0, box_pose0, bigman_env.get_state_info(),\n joint_idxs=bigman_params['joint_ids'][body_part_sensed])\nbigman_env.add_condition(condition0)\nreset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose0)\n\n#q1 = np.zeros(31)\nq1 = q0.copy()\nq1[15] = np.deg2rad(25)\nq1[18] = np.deg2rad(-45)\nq1[24] = np.deg2rad(25)\nq1[27] = np.deg2rad(-45)\nbox_pose1 = create_box_relative_pose(box_x=box_x+0.02, box_y=box_y+0.02, box_z=box_z, box_yaw=box_yaw+5)\ncondition1 = create_bigman_box_condition(q1, box_pose1, bigman_env.get_state_info(),\n joint_idxs=bigman_params['joint_ids'][body_part_sensed])\nbigman_env.add_condition(condition1)\nreset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose1)\n\nq2 = q0.copy()\nq2[16] = np.deg2rad(50)\nq2[18] = np.deg2rad(-50)\nq2[25] = np.deg2rad(-50)\nq2[27] = np.deg2rad(-50)\nbox_pose2 = create_box_relative_pose(box_x=box_x-0.02, box_y=box_y-0.02, box_z=box_z, box_yaw=box_yaw-5)\ncondition2 = create_bigman_box_condition(q2, box_pose2, bigman_env.get_state_info(),\n joint_idxs=bigman_params['joint_ids'][body_part_sensed])\nbigman_env.add_condition(condition2)\nreset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose2)\n\n# q3 = q0.copy()\n# q3[16] = np.deg2rad(0)\n# q3[18] = np.deg2rad(0)\n# q3[25] = np.deg2rad(0)\n# q3[27] = np.deg2rad(0)\n# box_pose3 = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw+5)\n# condition3 = create_bigman_box_condition(q3, box_pose3, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition3)\n# reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose3)\n\n# q4 = q0.copy()\n# box_pose4 = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw-5)\n# condition4 = create_bigman_box_condition(q4, box_pose4, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition4)\n# reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose4)\n\n\n\n\n\n\n# #################### #\n# #################### #\n# ## DEMONSTRATIONS ## #\n# #################### #\n# #################### #\nif init_with_demos is True:\n change_print_color.change('GREEN')\n if demos_dir is None:\n task_space_torque_control_demos_params = {\n 'n_samples': 5,\n 'conditions_to_sample': range(len(bigman_env.get_conditions())),\n 'Treach': Treach,\n 'Tlift': Tlift,\n 'Tinter': Tinter,\n 'Tend': Tend,\n 'Ts': Ts,\n 'noisy': False,\n 'noise_hyperparams': {\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n },\n 'bigman_env': bigman_env,\n 'box_relative_pose': box_relative_pose,\n 'box_size': box_size,\n 'final_box_height': final_box_height,\n }\n demos_samples = task_space_torque_control_demos(**task_space_torque_control_demos_params)\n bigman_env.reset(time=2, cond=0)\n else:\n demos_samples = load_task_space_torque_control_demos(demos_dir)\n print('Demos samples has been obtained from directory %s' % demos_dir)\nelse:\n demos_samples = None\n\n\n# ######################## #\n# ######################## #\n# ## LEARNING ALGORITHM ## #\n# ######################## #\n# ######################## #\nchange_print_color.change('YELLOW')\nprint(\"\\nConfiguring learning algorithm...\\n\")\n\n# Learning params\nresume_training_itr = 91 # Resume from previous training iteration\ndata_files_dir = 'GPS_2017-08-14_10:35:40' # 'GPS_2017-08-04_09:40:59' # In case we want to resume from previous training\n\ntraj_opt_method = {'type': TrajOptLQR,\n 'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n }\n# traj_opt_method = {'type': TrajOptPI2,\n# 'del0': 1e-4, # Dual variable updates for non-PD Q-function.\n# 'kl_threshold': 1.0, # KL-divergence threshold between old and new policies.\n# 'covariance_damping': 2.0, # If greater than zero, covariance is computed as a multiple of the old\n# # covariance. Multiplier is taken to the power (1 / covariance_damping).\n# # If greater than one, slows down convergence and keeps exploration noise\n# # high for more iterations.\n# 'min_temperature': 0.001, # Minimum bound of the temperature optimization for the soft-max\n# # probabilities of the policy samples.\n# 'use_sumexp': False,\n# 'pi2_use_dgd_eta': False,\n# 'pi2_cons_per_step': True,\n# }\n\nif demos_samples is None:\n# # init_traj_distr values can be lists if they are different for each condition\n# init_traj_distr = {'type': init_lqr,\n# # Parameters to calculate initial COST function based on stiffness\n# 'init_var': 3.0e-1, # Initial Variance\n# 'stiffness': 5.0e-1, # Stiffness (multiplies q)\n# 'stiffness_vel': 0.01, # 0.5, # Stiffness_vel*stiffness (multiplies qdot)\n# 'final_weight': 10.0, # Multiplies cost at T\n# # Parameters for guessing dynamics\n# 'init_acc': np.zeros(action_dim), # dU vector(np.array) of accelerations, default zeros.\n# #'init_gains': 1.0*np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n# #'init_gains': 1.0/np.array([5000.0, 8000.0, 5000.0, 5000.0, 300.0, 2000.0, 300.0]), # dU vector(np.array) of gains, default ones.\n# 'init_gains': np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n# }\n init_traj_distr = {'type': init_pd,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active]))*0.3e-1, # Initial variance (Default:10)\n 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,\n 3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Initial variance (Default:10)\n # 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,\n # 3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0, # Initial variance (Default:10)\n 'pos_gains': 0.001, #np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])*1.0e+1, # 0.001, # Position gains (Default:10)\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': len(bigman_params['joint_ids'][body_part_sensed]), # Total joints in state\n }\nelse:\n init_traj_distr = {'type': init_demos,\n 'sample_lists': demos_samples\n }\n\nlearned_dynamics = {'type': DynamicsLRPrior,\n 'regularization': 1e-6,\n 'prior': {\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n 'strength': 1.0, # Adjusts the strength of the prior.\n },\n }\n\n# gps_algo = 'pigps'\n# gps_algo_hyperparams = {'init_pol_wt': 0.01,\n# 'policy_sample_mode': 'add'\n# }\ngps_algo = 'mdgps'\ngps_algo_hyperparams = {'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add',\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n }\n\ngps_hyperparams = {\n 'T': int(EndTime/Ts), # Total points\n 'dt': Ts,\n 'iterations': 91, # 100 # 2000 # GPS episodes, \"inner iterations\" --> K iterations\n 'test_after_iter': True, # If test the learned policy after an iteration in the RL algorithm\n 'test_samples': 2, # Samples from learned policy after an iteration PER CONDITION (only if 'test_after_iter':True)\n # Samples\n 'num_samples': 5, # 20 # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'sample_on_policy': False, # Whether generate on-policy samples or off-policy samples\n #'noise_var_scale': np.array([5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2]), # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n #'noise_var_scale': np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*10, # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n 'smooth_noise_var': 5.0e+0, # Variance to apply to Gaussian Filter\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n 'cost': cost_sum,\n # Conditions\n 'conditions': len(bigman_env.get_conditions()), # Total number of initial conditions\n 'train_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for training\n 'test_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for testing\n # KL step (epsilon)\n 'kl_step': 0.2, # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step in LQR)\n 'max_step_mult': 1.0, #3 # 10.0, # Max possible value of step multiplier (multiplies kl_step in LQR)\n # Others\n 'gps_algo': gps_algo,\n 'gps_algo_hyperparams': gps_algo_hyperparams,\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': True,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-6, # Max value for x0sigma in trajectories # TODO: CHECK THIS VALUE, maybe it is too low\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization\n 'data_files_dir': data_files_dir,\n}\n\n\nlearn_algo = GPS(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nprint(\"Learning algorithm: %s OK\\n\" % type(learn_algo))\n\n# import numpy as np\n# dX = bigman_env.state_dim\n# dU = bigman_env.action_dim\n# dO = bigman_env.obs_dim\n# T = gps_hyperparams['T']\n# all_actions = np.zeros((T, dU))\n# all_states = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dX))\n# all_obs = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dO))\n# sample = Sample(bigman_env, T)\n# sample.set_acts(all_actions) # Set all actions at the same time\n# sample.set_obs(all_obs) # Set all obs at the same time\n# sample.set_states(all_states) # Set all states at the same time\n# costs = learn_algo._eval_conditions_sample_list_cost([SampleList([sample])])\n# raw_input('zacataaaaaaaaa')\n\n\n# Optimize policy using learning algorithm\nprint(\"Running Learning Algorithm!!!\")\ntraining_successful = learn_algo.run(resume_training_itr)\nif training_successful:\n print(\"Learning Algorithm has finished SUCCESSFULLY!\")\nelse:\n print(\"Learning Algorithm has finished WITH ERRORS!\")\n\n\n# ############################## #\n# ############################## #\n# ## SAMPLE FROM FINAL POLICY ## #\n# ############################## #\n# ############################## #\nif training_successful:\n conditions_to_sample = gps_hyperparams['test_conditions']\n change_print_color.change('GREEN')\n n_samples = 1\n noisy = False\n sampler_hyperparams = {\n 'noisy': noisy,\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n 'T': int(EndTime/Ts)*10, # Total points\n 'dt': Ts\n }\n sampler = Sampler(bigman_agent.policy, bigman_env, **sampler_hyperparams)\n print(\"Sampling from final policy!!!\")\n sample_lists = list()\n for cond_idx in conditions_to_sample:\n raw_input(\"\\nSampling %d times from condition %d and with policy:%s (noisy:%s). \\n Press a key to continue...\" %\n (n_samples, cond_idx, type(bigman_agent.policy), noisy))\n sample_list = sampler.take_samples(n_samples, cond=cond_idx, noisy=noisy)\n # costs = learn_algo._eval_conditions_sample_list_cost([sample_list])\n # # print(costs)\n # # raw_input('pppp')\n # sample_lists.append(sample_list)\n\n bigman_env.reset(time=1, cond=0)\n\n\n\n\nprint(\"The script has finished!\")\nos._exit(0)\n\n" }, { "alpha_fraction": 0.5389687418937683, "alphanum_fraction": 0.5445074439048767, "avg_line_length": 36.16666793823242, "blob_id": "286483f781f1779cd7adca6fcedaea3fb90c1a37", "content_id": "6e38c67813d7fb821a97b3c636a6f12f45af39ca", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7583, "license_type": "permissive", "max_line_length": 83, "num_lines": 204, "path": "/robolearn/torch/policies/tanh_gaussian_mixture_multi_policyBK.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom torch.distributions import Multinomial\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\n\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\nLOG_MIX_COEFF_MIN = -10\nLOG_MIX_COEFF_MAX = -4.5e-5\n\n\nclass TanhGaussianMixtureMultiPolicy(PyTorchModule, ExplorationPolicy):\n def __init__(self, multipolicy, mix_hidden_sizes, pol_idxs=None,\n mix_hidden_w_init=ptu.xavier_initOLD,\n mix_hidden_b_init_val=0,\n mix_hidden_activation=F.relu,\n optimize_multipolicy=False,\n reuse_shared=False):\n self.save_init_params(locals())\n super(TanhGaussianMixtureMultiPolicy, self).__init__()\n\n self._multipolicy = multipolicy\n\n self.input_size = self._multipolicy.input_size\n\n if pol_idxs is None:\n n_heads = self._multipolicy.n_heads\n pol_idxs = list(range(n_heads))\n self.pol_idxs = pol_idxs\n\n # TODO: ASSUMING SAME ACTION DIMENSION\n self._action_dim = self._multipolicy.action_dim\n\n # Mixture Coefficients\n # TODO: MAYBE WE CAN REUSE LATER THE SHARED LAYERS OF THE MULTIPOLICY\n self.mix_hidden_activation = mix_hidden_activation\n self.mixfcs = list()\n in_size = self.input_size\n for i, next_size in enumerate(mix_hidden_sizes):\n fc = nn.Linear(in_size, next_size)\n in_size = next_size\n # mix_hidden_w_init(fc.weight)\n nn.init.xavier_normal_(fc.weight.data,\n gain=nn.init.calculate_gain('relu'))\n ptu.fill(fc.bias, mix_hidden_b_init_val)\n self.__setattr__(\"mixfc{}\".format(i), fc)\n self.mixfcs.append(fc)\n in_size = next_size\n\n self.last_mixfc = nn.Linear(in_size, self._multipolicy.n_heads)\n nn.init.xavier_normal_(self.last_mixfc.weight.data,\n gain=nn.init.calculate_gain('linear'))\n\n # if init_mixt_coeff is None:\n # init_mixt_coeff = np.array([1. / len(self.pol_idxs)\n # for _ in pol_idxs])\n # mixture_coeff = FloatTensor([1.0, 1.0])\n # self._mixture_coeff = nn.Parameter(mixture_coeff, requires_grad=True)\n\n # Label to detach gradients from multipolicy\n self._optimize_multipolicy = optimize_multipolicy\n\n def get_action(self, obs_np, **kwargs):\n action, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n return action[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n action, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return action, info_dict\n\n def forward(self,\n obs,\n deterministic=False,\n return_log_prob=False,\n optimize_policies=False,\n **nn_kwargs\n ):\n\n # Get Values from multipolicy\n pol_idxs = self.pol_idxs\n actions, policy_infos = \\\n self._multipolicy(obs,\n deterministic=deterministic,\n return_log_prob=return_log_prob,\n **nn_kwargs)\n\n log_actions = policy_infos['log_prob']\n\n actions_concat = torch.cat([action.unsqueeze(dim=-1)\n for action in actions], dim=-1) # NxAxK\n # print('actions_concat', actions_concat.shape)\n\n if not self._optimize_multipolicy:\n actions_concat = actions_concat.detach()\n\n # Compute mixture coefficients\n h = obs\n for i, fc in enumerate(self.mixfcs):\n h = self.mix_hidden_activation(fc(h))\n log_mixture_coeff = self.last_mixfc(h)\n\n # TODO: CHECK IF ITS BETTER TO CLAMP\n # log_mixture_coeff = torch.clamp(log_mixture_coeff,\n # min=LOG_MIX_COEFF_MIN,\n # max=LOG_MIX_COEFF_MAX) # NxK\n\n mixture_coeff = torch.exp(log_mixture_coeff) \\\n / torch.sum(torch.exp(log_mixture_coeff), dim=-1,\n keepdim=True)\n\n if torch.isnan(mixture_coeff).any():\n raise ValueError('Any mixture coeff is NAN:',\n mixture_coeff)\n\n # print(log_mixture_coeff)\n # print(mixture_coeff)\n # print('--')\n\n # TODO: CHECK IF NOT PROPAGATING GRADIENTS HERE IS A PROBLEM\n # Sample latent variables\n z = Multinomial(logits=log_mixture_coeff).sample() # NxK\n\n # Choose mixture component corresponding\n weighted_action = torch.sum(actions_concat*z.unsqueeze(-2), dim=-1)\n\n # weighted_action = \\\n # torch.sum(actions_concat * log_mixture_coeff.unsqueeze(-2), dim=-1) \\\n # / torch.sum(log_mixture_coeff, dim=-1, keepdim=True)\n\n if return_log_prob is True:\n log_actions_concat = \\\n torch.cat([log_action.unsqueeze(dim=-1)\n for log_action in log_actions], dim=-1)\n\n if not self._optimize_multipolicy:\n log_actions_concat = log_actions_concat.detach()\n\n log_actions_concat = torch.sum(log_actions_concat*z.unsqueeze(-1),\n dim=-2)\n weighted_log_action = \\\n torch.logsumexp(log_actions_concat + log_mixture_coeff, dim=-1,\n keepdim=True) \\\n - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)\n\n # weighted_log_action = \\\n # torch.sum(log_actions_concat * log_mixture_coeff.unsqueeze(-2),\n # dim=-1) \\\n # / torch.sum(log_mixture_coeff, dim=-1, keepdim=True)\n else:\n weighted_log_action = None\n\n \"\"\"\n mixture_coeff = torch.exp(log_mixture_coeff) \\\n / torch.sum(torch.exp(log_mixture_coeff), dim=-1,\n keepdim=True)\n\n weighted_action = \\\n torch.sum(actions_concat*mixture_coeff.unsqueeze(-2),\n dim=-1)\n\n if return_log_prob is True:\n log_actions_concat = \\\n torch.cat([log_action.unsqueeze(dim=-1)\n for log_action in log_actions], dim=-1)\n\n if not self._optimize_multipolicy:\n log_actions_concat = log_actions_concat.detach()\n\n log_actions_concat = \\\n torch.sum(log_actions_concat*mixture_coeff.unsqueeze(-1),\n dim=-2)\n\n weighted_log_action = \\\n torch.logsumexp(log_actions_concat + log_mixture_coeff, dim=-1,\n keepdim=True) \\\n - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)\n else:\n weighted_log_action = None\n \"\"\"\n\n info_dict = dict(\n log_action=weighted_log_action,\n mixing_coeff=mixture_coeff,\n )\n\n return weighted_action, info_dict\n\n @property\n def n_heads(self):\n return self._multipolicy.n_heads\n\n" }, { "alpha_fraction": 0.7018255591392517, "alphanum_fraction": 0.7200811505317688, "avg_line_length": 17.961538314819336, "blob_id": "8ef65ba6611221f2e99a00d08339613b4a1aedec", "content_id": "242b4cd7ef9d78c1c14c52217c458a8aeef27ac6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "permissive", "max_line_length": 67, "num_lines": 26, "path": "/examples/miscellaneous/test_dynamics.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom robolearn.torch.models import TVLGDynamics\n\nhorizon = 200\ndO = 6\ndA = 3\nbatch = 100\n\ndynamics = TVLGDynamics(horizon=horizon, obs_dim=dO, action_dim=dA)\n\nobs = torch.rand(batch, horizon, dO)\nact = torch.rand(batch, horizon, dA)\n\nprint('Dynamics parameters:')\nfor name, parameter in dynamics.named_parameters():\n print(name, parameter.shape)\n\n\nprint('Forward dynamics:')\nt = 0\n# next_obs = dynamics(obs, act, time=t)\n\nprint('Fitting')\ndynamics.fit(obs, act)\n\ninput('wuuu')\n" }, { "alpha_fraction": 0.5961002707481384, "alphanum_fraction": 0.6072423458099365, "avg_line_length": 28.91666603088379, "blob_id": "3093b1ddbdda89a12644d2a26063c9096e43ba23", "content_id": "3880dfa65f4f002ead60d01aa6674265255fca95", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "permissive", "max_line_length": 73, "num_lines": 12, "path": "/robolearn/torch/utils/nn/modules/huber_loss.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch.nn as nn\n\n\nclass HuberLoss(nn.Module):\n def __init__(self, delta=1):\n super(HuberLoss, self).__init__()\n self.huber_loss_delta1 = nn.SmoothL1Loss()\n self.delta = delta\n\n def forward(self, x, x_hat):\n loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta)\n return loss * self.delta * self.delta\n" }, { "alpha_fraction": 0.5463646054267883, "alphanum_fraction": 0.5542676448822021, "avg_line_length": 25.73239517211914, "blob_id": "c23815e949d1c3adba531293b5d0f3c41406f7ee", "content_id": "dd9b0a208a0d3797f5438cdc3a520d005910d39e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1898, "license_type": "permissive", "max_line_length": 77, "num_lines": 71, "path": "/robolearn/torch/models/transitions/gmm.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.utils.serializable import Serializable\nimport robolearn.torch.utils.pytorch_util as ptu\n\n\nclass GMM(PyTorchModule):\n def __init__(self, init_sequential=False, eigreg=False, warmstart=True):\n self._init_sequential = init_sequential\n self._eigreg = eigreg\n self._warmstart = warmstart\n self._sigma = None\n\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n super(GMM, self).__init__()\n\n def inference(self, pts):\n \"\"\"\n Evaluate dynamics prior.\n Args:\n pts: N x D array of points.\n\n Returns:\n mu0: mean\n Phi: covar\n m: number of\n n0: number of\n\n \"\"\"\n # Compute posterior cluster weights\n logwts = self.clusterwts(pts)\n\n # Compute posterior mean and covariance\n mu0, Phi = self.moments(logwts)\n\n # Set hyperparameters\n m = self.N\n n0 = m - 2 - mu0.shape[0]\n\n # Normalize\n m = float(m) / self.N\n n0 = float(n0) / self.N\n\n return mu0, Phi, m, n0\n\n def estep(self, data):\n \"\"\"\n Compute log observation probabilities under GMM.\n Args:\n data: N x D tensor of points\n\n Returns:\n logobs: N x K tensor of log probabilities (for each point on each\n cluster)\n\n \"\"\"\n # Constants\n N, D = data.shape\n K = self._sigma.shape[0]\n\n logobs = -0.5 * ptu.ones((N, K)) * D * np.log(2*np.pi) # Constant\n for i in range(K):\n mu, sigma = self._mu[i], self._sigma[i]\n L = torch.potri(sigma, upper=False) # Cholesky decomposition\n logobs[:, i] -= torch.sum(torch.log(torch.))\n\n @parameter\n def N(self):\n return self._N\n" }, { "alpha_fraction": 0.5804886221885681, "alphanum_fraction": 0.5845332741737366, "avg_line_length": 34.119319915771484, "blob_id": "2068a23c3c53ac727f3e040b67c1a073c93435c8", "content_id": "8166a56dcc51c5d807a6beafe82227e6e22736b9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6181, "license_type": "permissive", "max_line_length": 118, "num_lines": 176, "path": "/robolearn/envs/normalized_box_env.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom gym.spaces import Box\n\nfrom robolearn.utils.serializable import Serializable\nfrom robolearn.envs.proxy_env import ProxyEnv\n\n\nclass NormalizedBoxEnv(ProxyEnv, Serializable):\n \"\"\"\n Normalize action to in [-1, 1].\n\n Optionally normalize observations and scale reward.\n \"\"\"\n def __init__(\n self,\n env,\n reward_scale=1.,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n ):\n # self._wrapped_env needs to be called first because\n # Serializable.quick_init calls getattr, on this class. And the\n # implementation of getattr (see below) calls self._wrapped_env.\n # Without setting this first, the call to self._wrapped_env would call\n # getattr again (since it's not set yet) and therefore loop forever.\n self._wrapped_env = env\n # Or else serialization gets delegated to the wrapped_env. Serialize\n # this env separately from the wrapped_env.\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n ProxyEnv.__init__(self, env)\n\n # Observation Space\n if normalize_obs is True and online_normalization is True:\n raise AttributeError\n\n self._normalize_obs = normalize_obs\n self._online_normalize_obs = online_normalization\n\n if self._normalize_obs or self._online_normalize_obs:\n if obs_mean is None:\n obs_mean = np.zeros_like(env.observation_space.low)\n else:\n obs_mean = np.array(obs_mean)\n if obs_var is None:\n obs_var = np.ones_like(env.observation_space.low)\n else:\n obs_var = np.array(obs_var)\n\n self._obs_mean = obs_mean\n self._obs_var = obs_var\n self._obs_alpha = obs_alpha\n\n self._obs_mean_diff = np.zeros_like(env.observation_space.low)\n self._obs_n = 0\n\n # Action Space\n if isinstance(self._wrapped_env.action_space, Box):\n ub = np.ones(self._wrapped_env.action_space.shape)\n self.action_space = Box(-1 * ub, ub, dtype=np.float32)\n\n # Reward Scale\n self._reward_scale = reward_scale\n\n def estimate_obs_stats(self, obs_batch, override_values=False):\n if self._obs_mean is not None and not override_values:\n raise Exception(\"Observation mean and variance already set. To \"\n \"override, set override_values to True.\")\n self._obs_mean = np.mean(obs_batch, axis=0)\n self._obs_var = np.var(obs_batch, axis=0)\n\n def _update_obs_estimate(self, obs):\n flat_obs = obs.flatten()\n self._obs_mean = (1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs\n self._obs_var = (1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square(flat_obs - self._obs_mean)\n\n def _apply_normalize_obs(self, obs):\n # return (obs - self._obs_mean) / (self._obs_std + 1e-8)\n return (obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)\n\n def _apply_online_normalize_obs(self, obs):\n self._obs_n += 1.\n last_mean = self._obs_mean\n self._obs_mean += (obs-self._obs_mean)/self._obs_n\n self._obs_mean_diff += (obs-last_mean)*(obs-self._obs_mean)\n self._obs_var = np.sqrt(np.clip(self._obs_mean_diff / self._obs_n,\n 1.e-2, None))\n return self._apply_normalize_obs(obs)\n\n def __getstate__(self):\n d = Serializable.__getstate__(self)\n # Add these explicitly in case they were modified\n d[\"_obs_mean\"] = self._obs_mean\n d[\"_obs_var\"] = self._obs_var\n d[\"_reward_scale\"] = self._reward_scale\n return d\n\n def __setstate__(self, d):\n Serializable.__setstate__(self, d)\n self._obs_mean = d[\"_obs_mean\"]\n self._obs_var = d[\"_obs_var\"]\n self._reward_scale = d[\"_reward_scale\"]\n\n @property\n def obs_mean(self):\n return self._obs_mean\n\n @property\n def obs_var(self):\n return self._obs_var\n\n @property\n def reward_scale(self):\n return self._reward_scale\n\n def reset(self, *args, **kwargs):\n obs = self._wrapped_env.reset(*args, **kwargs)\n if self._normalize_obs:\n self._update_obs_estimate(obs)\n return self._apply_normalize_obs(obs)\n elif self._online_normalize_obs:\n return self._apply_online_normalize_obs(obs)\n else:\n return obs\n\n def step(self, action):\n if isinstance(self._wrapped_env.action_space, Box):\n # Scale Action\n lb = self._wrapped_env.action_space.low\n ub = self._wrapped_env.action_space.high\n scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)\n scaled_action = np.clip(scaled_action, lb, ub)\n else:\n scaled_action = action\n\n # Interact with Environment\n wrapped_step = self._wrapped_env.step(scaled_action)\n next_obs, reward, done, info = wrapped_step\n\n # Normalize Observation\n if self._normalize_obs:\n self._update_obs_estimate(next_obs)\n next_obs = self._apply_normalize_obs(next_obs)\n elif self._online_normalize_obs:\n next_obs = self._apply_online_normalize_obs(next_obs)\n\n # Scale Reward\n reward = reward * self._reward_scale\n\n return next_obs, reward, done, info\n\n def seed(self, *args, **kwargs):\n self._wrapped_env.seed(*args, **kwargs)\n\n @property\n def online_normalization(self):\n return self._online_normalize_obs\n\n @property\n def normalize_obs(self):\n return self._normalize_obs\n\n def __str__(self):\n return \"Normalized: %s\" % self._wrapped_env\n\n def log_diagnostics(self, paths, **kwargs):\n if hasattr(self._wrapped_env, \"log_diagnostics\"):\n return self._wrapped_env.log_diagnostics(paths, **kwargs)\n else:\n return None\n\n def __getattr__(self, attrname):\n return getattr(self._wrapped_env, attrname)\n" }, { "alpha_fraction": 0.5091533064842224, "alphanum_fraction": 0.5812357068061829, "avg_line_length": 28.100000381469727, "blob_id": "cb21c6e4c56bebe941e6a0a2c3ef7b41177054a8", "content_id": "57596bdf3312e9c3c366235317c9a8dd55b04301", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "permissive", "max_line_length": 82, "num_lines": 30, "path": "/scenarios/tests/interpolator-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation\nfrom robolearn.old_utils.trajectory_interpolators import spline_interpolation\n\nN = 100\nxf = np.array([2, 3, 4, 1])\nx0 = np.array([2, 2, 2, 2])\ndxf = np.array([0, 0, 0, 0])*N\ndx0 = np.array([0, 0, 0, 0])*N\nddxf = np.array([2, 0, 0, 0])*N**2\nddx0 = np.array([0, 0, 0, 0])*N**2\n#x, dx, ddx = polynomial5_interpolation(N, xf, x0, dxf, dx0, ddxf, ddx0)\n#\n#for ii in range(xf.size):\n# plt.plot(ddx[:, ii])\n#plt.show()\n\nN = 100\ntime_points = np.array([0, 5, 7, 10])\nvia_points = np.array([[2, 7, 8, 10],\n [7, 1, 3, 2],\n [1, 2, 4, 9],\n [4, 1, 4, 4]])\n\nx = spline_interpolation(N, time_points, via_points)\n\nfor ii in range(via_points.shape[1]):\n plt.plot(x[:, ii])\nplt.show()\n\n" }, { "alpha_fraction": 0.5304538607597351, "alphanum_fraction": 0.5379209518432617, "avg_line_length": 36.32240295410156, "blob_id": "6b4f93beab3066fc4ca96a07b75d473214f1a9a1", "content_id": "b30a160a569ec7d295dde5d3f1c41b2f62080a38", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6830, "license_type": "permissive", "max_line_length": 106, "num_lines": 183, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/traj_opt/traj_opt_utils.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file defines utilities for trajectory optimization\nAuthor: C. Finn et al. Code in: https://github.com/cbfinn/gps\n\"\"\"\nimport abc\n\nimport numpy as np\nimport scipy as sp\n\n\n# Constants used in TrajOptLQR.\nDGD_MAX_ITER = 3 # 50\nDGD_MAX_LS_ITER = 20\nDGD_MAX_GD_ITER = 200\n\nALPHA, BETA1, BETA2, EPS = 0.005, 0.9, 0.999, 1e-8 # Adam parameters\n\n\ndef traj_distr_kl(new_mu, new_sigma, new_traj_distr, prev_traj_distr, tot=True):\n \"\"\"\n Compute KL divergence between new and previous trajectory\n distributions.\n Args:\n new_mu: T x dX, mean of new trajectory distribution.\n new_sigma: T x dX x dX, variance of new trajectory distribution.\n new_traj_distr: A linear Gaussian policy object, new distribution.\n prev_traj_distr: A linear Gaussian policy object, previous distribution.\n tot: Whether or not to sum KL across all time steps.\n Returns:\n kl_div: The KL divergence between the new and previous\n trajectories.\n \"\"\"\n # Constants.\n T = new_mu.shape[0]\n dU = new_traj_distr.dU\n\n # Initialize vector of divergences for each time step.\n kl_div = np.zeros(T)\n\n # Step through trajectory.\n for t in range(T):\n # Fetch matrices and vectors from trajectory distributions.\n mu_t = new_mu[t, :]\n sigma_t = new_sigma[t, :, :]\n K_prev = prev_traj_distr.K[t, :, :]\n K_new = new_traj_distr.K[t, :, :]\n k_prev = prev_traj_distr.k[t, :]\n k_new = new_traj_distr.k[t, :]\n chol_prev = prev_traj_distr.chol_pol_covar[t, :, :]\n chol_new = new_traj_distr.chol_pol_covar[t, :, :]\n\n # Compute log determinants and precision matrices.\n logdet_prev = 2 * sum(np.log(np.diag(chol_prev)))\n logdet_new = 2 * sum(np.log(np.diag(chol_new)))\n prc_prev = sp.linalg.solve_triangular(\n chol_prev, sp.linalg.solve_triangular(chol_prev.T, np.eye(dU),\n lower=True)\n )\n prc_new = sp.linalg.solve_triangular(\n chol_new, sp.linalg.solve_triangular(chol_new.T, np.eye(dU),\n lower=True)\n )\n\n # Construct matrix, vector, and constants.\n M_prev = np.r_[\n np.c_[K_prev.T.dot(prc_prev).dot(K_prev), -K_prev.T.dot(prc_prev)],\n np.c_[-prc_prev.dot(K_prev), prc_prev]\n ]\n M_new = np.r_[\n np.c_[K_new.T.dot(prc_new).dot(K_new), -K_new.T.dot(prc_new)],\n np.c_[-prc_new.dot(K_new), prc_new]\n ]\n v_prev = np.r_[K_prev.T.dot(prc_prev).dot(k_prev),\n -prc_prev.dot(k_prev)]\n v_new = np.r_[K_new.T.dot(prc_new).dot(k_new), -prc_new.dot(k_new)]\n c_prev = 0.5 * k_prev.T.dot(prc_prev).dot(k_prev)\n c_new = 0.5 * k_new.T.dot(prc_new).dot(k_new)\n\n # Compute KL divergence at timestep t.\n kl_div[t] = max(0,\n -0.5 * mu_t.T.dot(M_new - M_prev).dot(mu_t) -\n mu_t.T.dot(v_new - v_prev) - c_new + c_prev -\n 0.5 * np.sum(sigma_t * (M_new-M_prev)) - 0.5 * logdet_new +\n 0.5 * logdet_prev)\n\n # Add up divergences across time to get total divergence.\n return np.sum(kl_div) if tot else kl_div\n\n\ndef traj_distr_kl_alt(new_mu, new_sigma, new_traj_distr, prev_traj_distr, tot=True):\n \"\"\"\n This function computes the same quantity as the function above.\n However, it is easier to modify and understand this function, i.e.,\n passing in a different mu and sigma to this function will behave properly.\n \"\"\"\n T, dX, dU = new_mu.shape[0], new_traj_distr.dX, new_traj_distr.dU\n kl_div = np.zeros(T)\n\n for t in range(T):\n K_prev = prev_traj_distr.K[t, :, :]\n K_new = new_traj_distr.K[t, :, :]\n\n k_prev = prev_traj_distr.k[t, :]\n k_new = new_traj_distr.k[t, :]\n\n sig_prev = prev_traj_distr.pol_covar[t, :, :]\n sig_new = new_traj_distr.pol_covar[t, :, :]\n\n chol_prev = prev_traj_distr.chol_pol_covar[t, :, :]\n chol_new = new_traj_distr.chol_pol_covar[t, :, :]\n\n inv_prev = prev_traj_distr.inv_pol_covar[t, :, :]\n inv_new = new_traj_distr.inv_pol_covar[t, :, :]\n\n logdet_prev = 2 * sum(np.log(np.diag(chol_prev)))\n logdet_new = 2 * sum(np.log(np.diag(chol_new)))\n\n K_diff, k_diff = K_prev - K_new, k_prev - k_new\n mu, sigma = new_mu[t, :dX], new_sigma[t, :dX, :dX]\n\n kl_div[t] = max(0,\n 0.5*(logdet_prev - logdet_new - new_traj_distr.dU +\n np.sum(np.diag(inv_prev.dot(sig_new))) +\n k_diff.T.dot(inv_prev).dot(k_diff) +\n mu.T.dot(K_diff.T).dot(inv_prev).dot(K_diff).dot(mu) +\n np.sum(np.diag(K_diff.T.dot(inv_prev).dot(K_diff).dot(sigma))) +\n 2*k_diff.T.dot(inv_prev).dot(K_diff).dot(mu)\n )\n )\n\n return np.sum(kl_div) if tot else kl_div\n\n\ndef approximated_cost(sample_list, traj_distr, traj_info):\n \"\"\"\n This function gives the LQR estimate of the cost function given the noise\n experienced along each sample in sample_list.\n Args:\n sample_list: List of samples to extract noise from.\n traj_distr: LQR controller to roll forward.\n traj_info: Used to obtain dynamics estimate to simulate trajectories.\n Returns:\n mu_all: Trajectory means corresponding to each sample in sample_list.\n predicted_cost: LQR estimates of cost of each sample in sample_list.\n \"\"\"\n T = traj_distr.T\n N = len(sample_list)\n dU = traj_distr.dU\n dX = traj_distr.dX\n noise = sample_list.get_noise()\n\n # Constants.\n idx_x = slice(dX)\n mu_all = np.zeros((N, T, dX+dU))\n\n # Pull out dynamics.\n Fm = traj_info.dynamics.Fm\n fv = traj_info.dynamics.fv\n dyn_covar = traj_info.dynamics.dyn_covar\n\n for i in range(N):\n mu = np.zeros((T, dX+dU))\n mu[0, idx_x] = traj_info.x0mu\n for t in range(T):\n mu[t, :] = np.hstack([\n mu[t, idx_x],\n (traj_distr.K[t, :, :].dot(mu[t, idx_x]) + traj_distr.k[t, :]\n + traj_distr.chol_pol_covar[t].T.dot(noise[i, t]))\n ])\n if t < T - 1:\n mu[t+1, idx_x] = Fm[t, :, :].dot(mu[t, :]) + fv[t, :]\n mu_all[i, :, :] = mu\n\n # Compute cost.\n predicted_cost = np.zeros((N, T))\n\n for i in range(N):\n for t in range(T):\n predicted_cost[i, t] = traj_info.cc[t] + \\\n 0.5 * mu_all[i,t,:].T.dot(traj_info.Cm[t, :, :]).dot(mu_all[i,t,:]) + \\\n mu_all[i,t,:].T.dot(traj_info.cv[t, :])\n\n return mu_all, predicted_cost\n" }, { "alpha_fraction": 0.7352941036224365, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 21.66666603088379, "blob_id": "ad231ca769abcbd4438c3c718062f8d3bddb7a88", "content_id": "515406d1bea375a7f32c711c34278aa3c7f1e607", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "permissive", "max_line_length": 23, "num_lines": 3, "path": "/robolearn/torch/utils/nn/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .utils import *\nfrom .modules import *\nfrom .networks import *\n" }, { "alpha_fraction": 0.5273613333702087, "alphanum_fraction": 0.5367316603660583, "avg_line_length": 38.82089614868164, "blob_id": "50fdf7fa7dbf79de3c19de333e15d9ddf9e02b4b", "content_id": "c538702c49d32b4e17ea3e6e0bd2b3979d74a23f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2668, "license_type": "permissive", "max_line_length": 94, "num_lines": 67, "path": "/scenarios/iros2018/plots/plot_distance_to_target_or_obst.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom robolearn.old_utils.plots.policy_final_distance_new import plot_policy_final_distance_new\n\nmethod = 'gps' # 'gps' or 'trajopt'\n\n\noption = 0 # 0: plot mdgps-bmdgps-dmdgps | 1: plot remove_bad experiment\nitr_to_load = None # list(range(8))\nblock = False\nper_state = False\nlatex_plot = True\nplot_tgt = True # False will plot safe distance\n\nif option == 0:\n # Paper logs: Methods comparison, Distance to Tgt plot\n gps_directory_names = ['mdgps_log', 'bmdgps_log', 'dmdgps_log']\n gps_models_labels = ['MDGPS', 'B-MDGPS', 'D-MDGPS']\nelif option == 1:\n # Papers logs:\n gps_directory_names = ['mdgps_log', 'mdgps_no1_log', 'mdgps_no2_log']\n gps_models_labels = ['MDGPS', 'MDGPS no 1/6 worst', 'MDGPS no 2/6 worst']\nelse:\n raise ValueError(\"Wrong script option '%s'\" % str(option))\n\nif plot_tgt:\n states_idxs = [6, 7]\n tolerance = 0.1\nelse:\n states_idxs = [9, 10]\n tolerance = 0.15\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\n# GPS\nconds_to_combine = list(range(4))\nplot_policy_final_distance_new(dir_names, states_idxs,\n itr_to_load=itr_to_load, method=method,\n per_element=per_state,\n conds_to_combine=conds_to_combine,\n latex_plot=True,\n gps_models_labels=gps_models_labels,\n block=block, tolerance=tolerance,\n plot_title='Training conditions')\n\nconds_to_combine = list([4])\nplot_policy_final_distance_new(dir_names, states_idxs,\n itr_to_load=itr_to_load, method=method,\n per_element=per_state,\n conds_to_combine=conds_to_combine,\n latex_plot=True,\n gps_models_labels=gps_models_labels,\n block=block, tolerance=tolerance,\n plot_title='Test condition')\n\n# # TRAJOPT\n# conds_to_combine = list(range(5))\n# conds_to_combine = None\n# plot_policy_final_distance_new(dir_names, states_idxs,\n# itr_to_load=itr_to_load, method=method,\n# per_element=per_state,\n# conds_to_combine=conds_to_combine,\n# latex_plot=True,\n# gps_models_labels=gps_models_labels,\n# block=block, tolerance=tolerance)\n\ninput('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5654468536376953, "alphanum_fraction": 0.5687309503555298, "avg_line_length": 31.052631378173828, "blob_id": "f6c50f26e1507a409bb85f2cbaf84ca8bc6b11f6", "content_id": "ec445a5fe5190ddc1945d65471cbe6a754d94c68", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8526, "license_type": "permissive", "max_line_length": 90, "num_lines": 266, "path": "/robolearn/torch/algorithms/rl_algos/reinforce/reinforce.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Haarnoja's TensorFlow SQL implementation\n\nhttps://github.com/haarnoja/softqlearning\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n# from torch.autograd import Variable\n\nfrom collections import OrderedDict\n\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.utils import eval_util\nfrom robolearn.utils.logging import logger\nfrom robolearn.models.policies import MakeDeterministic\n\nEPS = 1e-6\n\n\ndef assert_shape(tensor, expected_shape):\n tensor_shape = list(tensor.shape)\n assert len(tensor_shape) == len(expected_shape)\n assert all([a == b for a, b in zip(tensor_shape, expected_shape)])\n\n\nclass Reinforce(RLAlgorithm, TorchAlgorithm):\n \"\"\"Reinforce Algorithm\n\n \"\"\"\n def __init__(self,\n env,\n policy,\n\n policy_lr=1e-3,\n optimizer_class=optim.Adam,\n\n causality=True,\n discounted=False,\n\n plotter=None,\n eval_deterministic=True,\n **kwargs):\n \"\"\"\n\n Args:\n env:\n qf (`robolearn.PyTorchModule`): Q-function approximator.\n policy (`robolearn.PyTorchModule`):\n policy_lr (`float`): Learning rate used for the Policy approximator.\n plotter (`MultiQFPolicyPlotter`): Plotter instance to be used for\n visualizing Q-function during training.\n eval_deterministic: Evaluate with deterministic version of current\n _i_policy.\n **kwargs:\n \"\"\"\n if eval_deterministic:\n eval_policy = MakeDeterministic(policy)\n else:\n eval_policy = policy\n super(Reinforce, self).__init__(\n env=env,\n exploration_policy=policy,\n eval_policy=eval_policy,\n **kwargs\n )\n self.policy = policy\n\n self.plotter = plotter\n\n # Env data\n self._action_dim = self.explo_env.action_space.low.size\n self._obs_dim = self.explo_env.observation_space.low.size\n\n # Optimize Policy\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n )\n\n # Return computation\n self._causality = causality\n self.discounted = discounted\n\n def pretrain(self):\n # Match target Qfcn with current one\n self._update_target_q_fcn()\n\n def _do_training(self):\n # batch = self.get_batch()\n paths = self.get_exploration_paths()\n\n # Update Networks\n\n # print('n_step', self._n_total_train_steps)\n # bellman_residual = self._update_softq_fcn(paths)\n surrogate_cost = self._update_policy(paths)\n # self._update_target_softq_fcn()\n\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n # self.eval_statistics['Bellman Residual (QFcn)'] = \\\n # np.mean(ptu.get_numpy(bellman_residual))\n self.eval_statistics['Surrogate Reward (Policy)'] = \\\n np.mean(ptu.get_numpy(surrogate_cost))\n\n def _update_q_fcn(self, batch):\n \"\"\"\n Q-fcn update\n Args:\n batch:\n\n Returns:\n\n \"\"\"\n # TODO: Implement for AC VERSION\n pass\n # obs = batch['observations']\n # actions = batch['actions']\n # next_obs = batch['next_observations']\n # rewards = batch['rewards']\n # terminals = batch['terminals']\n # n_batch = obs.shape[0]\n #\n # # \\hat Q in Equation 11\n # # ys = (self.reward_scale * rewards.squeeze() + # Current reward\n # ys = (rewards.squeeze() + # IT IS NOT NECESSARY TO SCALE REWARDS (ALREADY DONE)\n # (1 - terminals.squeeze()) * self.discount * next_value # Future return\n # ).detach() # TODO: CHECK IF I AM DETACHING GRADIENT!!!\n # assert_shape(ys, [n_batch])\n #\n # # Equation 11:\n # bellman_residual = 0.5 * torch.mean((ys - q_values) ** 2)\n #\n # # Gradient descent on _i_policy parameters\n # self._i_qf_optimizer.zero_grad() # Zero all model var grads\n # bellman_residual.backward() # Compute gradient of surrogate_loss\n # self._i_qf_optimizer.step() # Update model vars\n #\n # return bellman_residual\n\n def _update_policy(self, paths):\n \"\"\"\n Policy update:\n Returns:\n\n \"\"\"\n\n rewards = []\n obs = []\n log_probs = []\n qs = []\n\n for ii, path in enumerate(paths):\n rewards.append(path['rewards'])\n obs.append(path['observations'])\n log_probs.append(self.policy.log_action(path['actions'],\n path['observations']))\n qs.append(self._accum_rewards(path['rewards']))\n\n log_probs = torch.cat([log_prob for log_prob in log_probs])\n qs = torch.cat([q for q in qs])\n\n weighted_log_probs = torch.mul(log_probs, qs)\n\n loss = -torch.mean(weighted_log_probs)\n\n # Gradient descent on _i_policy parameters\n self.policy_optimizer.zero_grad() # Zero all model var grads\n loss.backward() # Compute gradient of surrogate_loss\n self.policy_optimizer.step() # Update model vars\n\n return loss\n\n def _update_target_q_fcn(self):\n # Implement for AC version\n pass\n # if self.use_hard_updates:\n # # print(self._n_total_train_steps, self.hard_update_period)\n # if self._n_total_train_steps % self.hard_update_period == 0:\n # ptu.copy_model_params_from_to(self._i_qf, self.target_qf)\n # else:\n # ptu.soft_update_from_to(self._i_qf, self.target_qf,\n # self.soft_target_tau)\n\n def _accum_rewards(self, rewards, normalize=False):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n\n if self._causality:\n discounted_r = ptu.zeros_like(rewards)\n T = rewards.shape[0]\n running_add = 0\n for t in reversed(range(0, T)):\n if self.discounted:\n gamma = self.discount\n else:\n gamma = 1\n running_add = rewards[t, :] + running_add*gamma\n discounted_r[t, :] = running_add\n\n if normalize:\n discounted_r = self._normalize(discounted_r)\n\n else:\n discounted_r = torch.sum(rewards, dim=0, keepdim=True)\n\n return discounted_r\n\n @staticmethod\n def _normalize(data, mean=0.0, std=1.0):\n n_data = (data - torch.mean(data)) / (torch.std(data) + 1e-8)\n return n_data * (std + 1e-8) + mean\n\n @property\n def torch_models(self):\n return [\n self.policy,\n ]\n\n def get_epoch_snapshot(self, epoch):\n if self.plotter is not None:\n self.plotter.draw()\n\n snapshot = super(Reinforce, self).get_epoch_snapshot(epoch)\n snapshot.update(\n policy=self.eval_policy,\n trained_policy=self.policy,\n )\n return snapshot\n\n def evaluate(self, epoch):\n # TODO: AT THIS MOMENT THIS CODE IS THE SAME THAN SUPER\n statistics = OrderedDict()\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n logger.log(\"Collecting samples for evaluation\")\n test_paths = self.eval_sampler.obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Test\"))\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n if hasattr(self.explo_env, \"log_diagnostics\"):\n print('TODO: WE NEED LOG_DIAGNOSTICS IN ENV')\n self.explo_env.log_diagnostics(test_paths)\n\n average_returns = eval_util.get_average_returns(test_paths)\n statistics['Average Test Return'] = average_returns\n\n # Record the data\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n if self.plotter is not None:\n self.plotter.draw()\n" }, { "alpha_fraction": 0.8421052694320679, "alphanum_fraction": 0.8421052694320679, "avg_line_length": 37, "blob_id": "8e190751d2f94ba67a138387757693369e5f93f6", "content_id": "feec96333263bc4d07c85f84cd076a220fcedc15", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "permissive", "max_line_length": 37, "num_lines": 1, "path": "/robolearn/algorithms/rl_algos/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .rl_algorithm import RLAlgorithm\n" }, { "alpha_fraction": 0.5611692070960999, "alphanum_fraction": 0.608181357383728, "avg_line_length": 43.72013473510742, "blob_id": "9d5461dc4e224eace2af97f97f988ffe5a868bd5", "content_id": "e1446a711d9b61ed561256a107fc6587d052f700", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13103, "license_type": "permissive", "max_line_length": 143, "num_lines": 293, "path": "/scenarios/tests/trajrep-test-new.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport math\nimport os\nimport rospy\nimport matplotlib.pyplot as plt\nimport tf\nfrom XCM.msg import CommandAdvr\nfrom XCM.msg import JointStateAdvr\nfrom robolearn.old_utils.trajectory_reproducer import TrajectoryReproducer\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.transformations_utils import *\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation\nfrom robolearn.old_utils.plot_utils import plot_desired_sensed_torque_position\nfrom robolearn.old_utils.plot_utils import plot_joint_info\nfrom robolearn.old_utils.plot_utils import plot_desired_sensed_data\nfrom gazebo_msgs.srv import SpawnModel\nfrom gazebo_msgs.srv import DeleteModel\nfrom geometry_msgs.msg import Pose\nimport rbdl\n\nfrom robolearn.old_utils.robot_model import RobotModel\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n# Always turn off logger\nos.system(\"gz log -d 0\")\n\n#current_path = os.path.abspath(__file__)\npath = os.path.abspath(__file__)\ndir_path = os.path.dirname(path)\n\nload_torques = False\ntorques_saved_filename = 'torques_init_traj.npy'\njoints_to_move = bigman_params['joint_ids']['BA'][:14]\n#joints_to_move = [bigman_params['joint_ids']['BA'][6]]\ncontrol_mode = 'torque' # 'position'\n\nT_init = 3\nT_sleep = 2.\nremove_spawn_new_box = False\nfreq = 100\nbox_position = np.array([0.75,\n 0.00,\n 0.0184])\nbox_size = [0.4, 0.5, 0.3]\nbox_yaw = 0 # Degrees\nbox_orient = tf.transformations.rotation_matrix(np.deg2rad(box_yaw), [0, 0, 1])\nbox_matrix = homogeneous_matrix(rot=box_orient, pos=box_position)\n\nreach_method = 0\nlift_method = 2\n\n#traj_files = ['trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)+'_m'+str(reach_method)+'_reach.npy',\n# 'trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)+'_m'+str(lift_method)+'_lift.npy']\ntraj_files = ['trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)+'_m'+str(reach_method)+'_reach.npy']\ntraj_rep = TrajectoryReproducer(traj_files)\n\ndefault_joint_stiffness = np.array([8000., 5000., 8000., 5000., 5000., 2000.,\n 8000., 5000., 5000., 5000., 5000., 2000.,\n 5000., 8000., 5000.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.,\n 300., 300.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.])\ndefault_joint_damping = np.array([30., 50., 30., 30., 30., 5.,\n 30., 50., 30., 30., 30., 5.,\n 30., 50., 30.,\n 30., 50., 30., 30., 1., 5., 1.,\n 1., 1.,\n 30., 50., 30., 30., 1., 5., 1.])\n#pd_tau_weights = np.array([0.80, 0.50, 0.80, 0.50, 0.50, 0.20,\n# 0.80, 0.50, 0.50, 0.50, 0.50, 0.20,\n# 0.50, 0.80, 0.50,\n# 0.50, 0.80, 0.50, 0.50, 0.10, 0.20, 0.03,\n# 0.03, 0.03,\n# 0.50, 0.80, 0.50, 0.50, 0.10, 0.20, 0.03])\npd_tau_weights = np.array([0.80, 0.50, 0.80, 0.50, 0.50, 0.20,\n 0.80, 0.50, 0.50, 0.50, 0.50, 0.20,\n 0.50, 0.80, 0.50,\n 0.50, 0.80, 0.50, 0.50, 0.10, 0.20, 0.03,\n 0.03, 0.03,\n 0.50, 0.80, 0.50, 0.50, 0.10, 0.20, 0.03])\nKp_tau = 100*pd_tau_weights\n#Kd_tau = 2 * np.sqrt(Kp_tau)\nKd_tau = 2 * pd_tau_weights\n\n#Kp_tau = np.array([80, 50, 80, 50, 50, 20,\n# 80, 50, 50, 50, 50, 20,\n# 50, 80, 50,\n# 50, 80, 50, 50, 10, 20, 300,\n# 3, 3,\n# 50, 80, 50, 50, 10, 20, 300])\n#Kd_tau = np.array([1.60, 1.00, 1.60, 1.00, 1.00, 0.40,\n# 1.60, 1.00, 1.00, 1.00, 1.00, 0.40,\n# 1.00, 1.60, 1.00,\n# 1.00, 1.60, 1.00, 1.00, 0.20, 0.40, 0.00,\n# 0.06, 0.06,\n# 1.00, 1.60, 1.00, 1.00, 0.20, 0.40, 0.00])\n#Kd_tau = default_joint_damping\n\n#Kp_tau = 100 * default_joint_stiffness/1000\n#Kd_tau = 2 * default_joint_damping/100\n\n# ROBOT MODEL for trying ID\nrobot_urdf_file = '/home/domingo/robotology-superbuild/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_urdf_file = '/home/domingo/robotology-superbuild/configs/ADVR_shared/bigman/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf_file)\n#LH_name = 'LWrMot3'\n#RH_name = 'RWrMot3'\n\njoint_pos_state = np.zeros(robot_model.q_size)\njoint_vel_state = np.zeros(robot_model.qdot_size)\njoint_effort_state = np.zeros(robot_model.qdot_size)\njoint_stiffness_state = np.zeros(robot_model.qdot_size)\njoint_damping_state = np.zeros(robot_model.qdot_size)\njoint_state_id = []\n\n\ndef callback(data, params):\n joint_ids = params[0]\n joint_pos_state = params[1]\n joint_effort_state = params[2]\n #if not joint_ids:\n # joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_pos_state[joint_ids] = data.link_position\n joint_effort_state[joint_ids] = data.effort\n joint_stiffness_state[joint_ids] = data.stiffness\n joint_damping_state[joint_ids] = data.damping\n joint_vel_state[joint_ids] = data.link_velocity\n\npublisher = rospy.Publisher(\"/xbotcore/bigman/command\", CommandAdvr, queue_size=10)\nsubscriber = rospy.Subscriber(\"/xbotcore/bigman/joint_states\", JointStateAdvr, callback, (joint_state_id, joint_pos_state, joint_effort_state))\nrospy.init_node('traj_example')\npub_rate = rospy.Rate(freq)\ndes_cmd = CommandAdvr()\ndes_cmd.name = bigman_params['joints_names']\n\nNinit = int(np.ceil(T_init*freq))\njoint_init_traj = polynomial5_interpolation(Ninit, traj_rep.get_data(0), joint_pos_state)[0]\n\nprint(\"Moving to initial configuration with Position control.\")\nfor ii in range(Ninit):\n des_cmd.position = joint_init_traj[ii, :]\n des_cmd.stiffness = default_joint_stiffness\n des_cmd.damping = default_joint_damping\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n\n\nif remove_spawn_new_box:\n f = open('/home/domingo/robotlearning-superbuild/catkin_ws/src/robolearn_gazebo_env/models/cardboard_cube_box/model.sdf', 'r')\n sdf_box = f.read()\n f = open('/home/domingo/robotlearning-superbuild/catkin_ws/src/robolearn_gazebo_env/models/big_support/model.sdf', 'r')\n sdf_box_support = f.read()\n box_pose = Pose()\n box_pose.position.x = box_position[0]\n box_pose.position.y = box_position[1]\n box_pose.position.z = 1.014\n box_quat = tf.transformations.quaternion_from_matrix(box_matrix)\n box_pose.orientation.x = box_quat[0]\n box_pose.orientation.y = box_quat[1]\n box_pose.orientation.z = box_quat[2]\n box_pose.orientation.w = box_quat[3]\n box_support_pose = Pose()\n box_support_pose.position.x = box_position[0]\n box_support_pose.position.y = box_position[1]\n box_support_pose.position.z = 0\n box_support_pose.orientation = box_pose.orientation\n rospy.wait_for_service('gazebo/delete_model')\n delete_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)\n print(\"Deleting previous box...\")\n #raw_input(\"Press for delete box_support\")\n try:\n delete_model_prox(\"box_support\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/delete_model service call failed: %s\" % str(exc))\n try:\n delete_model_prox(\"box\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/delete_model service call failed: %s\" % str(exc))\n rospy.wait_for_service('gazebo/spawn_sdf_model')\n spawn_model_prox = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)\n print(\"Spawning new box...\")\n try:\n spawn_model_prox(\"box_support\", sdf_box_support, \"box_support\", box_support_pose, \"world\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/spawn_sdf_model service call failed: %s\" % str(exc))\n try:\n spawn_model_prox(\"box\", sdf_box, \"box\", box_pose, \"world\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/spawn_sdf_model service call failed: %s\" % str(exc))\n\n\nN = traj_rep.data_points\n\ntau = np.zeros(robot_model.qdot_size)\na = np.zeros(robot_model.qdot_size)\nM = np.zeros((robot_model.qdot_size, robot_model.qdot_size))\ndes_cmd.name = [bigman_params['joints_names'][idx] for idx in joints_to_move]\n\nqs = traj_rep.traj\nqdots = np.vstack((np.diff(qs, axis=0), np.zeros((1, robot_model.qdot_size))))*freq\nqddots = np.vstack((np.diff(qdots, axis=0), np.zeros((1, robot_model.qdot_size))))*freq*freq\ntaus = np.zeros((N, robot_model.qdot_size))\nsensed_taus = np.zeros((N, robot_model.qdot_size))\nsensed_qs = np.zeros((N, robot_model.q_size))\nsensed_qdots = np.zeros((N, robot_model.qdot_size))\n\ndes_cmd.position = []\ndes_cmd.effort = []\ndes_cmd.stiffness = []\ndes_cmd.damping = []\n#raw_input(\"Press a key for sending %s commands\" % control_mode.lower())\nfor ii in range(N):\n print(\"Sending LIFTING %s cmd %d/%d...\" % (control_mode.lower(), ii+1, N))\n if control_mode.lower() == 'position':\n des_cmd.position = qs[ii, joints_to_move]\n des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n des_cmd.damping = default_joint_damping[joints_to_move]\n\n elif control_mode.lower() == 'torque':\n #rbdl.InverseDynamics(robot_model.model, qs[ii, :], qdots[ii, :], qddots[ii, :], tau)\n #taus_traj[ii, :] = joint_effort_state\n #print(joint_traj[ii, joints_to_move] - joint_pos_state[joints_to_move])\n #rbdl.NonlinearEffects(robot_model, joint_pos_state, joint_vel_state, g)\n #rbdl.NonlinearEffects(robot_model, joint_pos_state, joint_vel_state*0, g)\n #a = joint_traj_ddots[ii, :] + \\\n # default_joint_damping*0 * (joint_traj_dots[ii, :] - joint_vel_state) + \\\n # default_joint_stiffness*0.0 * (joint_traj[ii, :] - joint_pos_state)\n\n # Computed Torque Control\n a = qddots[ii, :] + \\\n Kd_tau * (qdots[ii, :] - joint_vel_state) + \\\n Kp_tau * (qs[ii, :] - joint_pos_state)\n robot_model.update_torque(tau, joint_pos_state, joint_vel_state, a)\n\n ## FeedForward + PD compensation\n #robot_model.update_torque(tau, qs[ii, :], qdots[ii, :], qddots[ii, :])\n #pd_tau = Kp_tau * (qs[ii, :] - joint_pos_state) + \\\n # Kd_tau * (qdots[ii, :] - joint_vel_state)\n #tau += pd_tau\n\n\n #pd_tau = default_joint_stiffness * (qs[ii, :] - joint_pos_state) + \\\n # default_joint_damping * (qdots[ii, :] - joint_vel_state)\n #rbdl.InverseDynamics(robot_model, joint_pos_state, joint_vel_state, a, tau)\n #rbdl.NonlinearEffects(robot_model, qs[ii, :], joint_vel_state*0, tau)\n #tau = np.ones(robot_model.qdot_size)*-0.5\n #a = default_joint_damping * (joint_traj_dots[ii, :] - joint_vel_state)\n #rbdl.CompositeRigidBodyAlgorithm(robot_model, joint_pos_state, M, update_kinematics=True)\n #rbdl.InverseDynamics(robot_model, joint_pos_state, joint_vel_state/freq, qddots[ii, :]/(freq*freq), tau)\n #rbdl.InverseDynamics(robot_model, qs[ii, :], qdots[ii, :], qddots[ii, :], tau)\n #tau += M.dot(a)\n\n des_cmd.position = []\n des_cmd.effort = tau[joints_to_move]\n if ii <= 100:\n des_cmd.stiffness = np.zeros_like(tau[joints_to_move])\n des_cmd.damping = np.zeros_like(tau[joints_to_move])\n else:\n des_cmd.stiffness = []\n des_cmd.damping = []\n else:\n raise ValueError(\"Wrong control mode option: %s\" % control_mode)\n\n publisher.publish(des_cmd)\n sensed_taus[ii, :] = joint_effort_state\n taus[ii, :] = tau\n sensed_qs[ii, :] = joint_pos_state\n sensed_qdots[ii, :] = joint_vel_state\n pub_rate.sleep()\n\n# Return to position control\nprint(\"Changing to position control!\")\nfor ii in range(50):\n des_cmd.position = joint_pos_state[joints_to_move]\n des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n des_cmd.damping = default_joint_damping[joints_to_move]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n\njoints_to_plot = bigman_params['joint_ids']['LA']\ncols = 3\njoint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\nprint(\"Ploting...\")\n#plot_desired_sensed_data(joints_to_plot, qs, sensed_qs, joint_names, data_type='position', block=False)\n#plot_desired_sensed_data(joints_to_plot, qdots, sensed_qdots, joint_names, data_type='velocity', block=False)\nplot_desired_sensed_torque_position(joints_to_plot, taus, sensed_taus,\n qs, sensed_qs, joint_names, block=True, cols=cols)\nraw_input(\"Press a key to finish the script..\")\n" }, { "alpha_fraction": 0.5409709811210632, "alphanum_fraction": 0.5692268013954163, "avg_line_length": 25.664382934570312, "blob_id": "17c594a5716dae1e3c0f857c0fb2238cd75021ff", "content_id": "c5c409c70c52f11f8adb0df4d311aa7e2c65e683", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3893, "license_type": "permissive", "max_line_length": 97, "num_lines": 146, "path": "/examples/mujoco_envs/mujoco_all_sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Soft Actor Critic on some Gym Envs.\n\nNOTE: You need PyTorch 0.3 or more (to have torch.distributions)\n\"\"\"\nimport gym\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn.torch.policies import TanhGaussianPolicy\nfrom robolearn.torch.algorithms.rl_algos import SAC\nfrom robolearn.torch.utils.nn import FlattenMlp\n\nimport argparse\n\n\ndef experiment(variant):\n env = NormalizedBoxEnv(gym.make(variant['env_name']))\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n net_size = variant['net_size']\n qf = FlattenMlp(\n hidden_sizes=[net_size, net_size],\n input_size=obs_dim + action_dim,\n output_size=1,\n )\n vf = FlattenMlp(\n hidden_sizes=[net_size, net_size],\n input_size=obs_dim,\n output_size=1,\n )\n policy = TanhGaussianPolicy(\n hidden_sizes=[net_size, net_size],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n algorithm = SAC(\n explo_env=env,\n policy=policy,\n qf=qf,\n vf=vf,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n algorithm.train()\n\n\nENV_PARAMS = {\n 'half-cheetah': dict(\n env_name='HalfCheetah-v2',\n algo_params=dict(\n num_epochs=1000,\n num_steps_per_epoch=1000,\n num_steps_per_eval=1000,\n batch_size=128,\n max_path_length=999,\n discount=0.99,\n\n soft_target_tau=0.001,\n policy_lr=3E-4,\n qf_lr=3E-4,\n vf_lr=3E-4,\n ),\n net_size=300\n ),\n 'reacher': dict(\n env_name='Reacher-v2',\n algo_params=dict(\n num_epochs=1000,\n num_steps_per_epoch=2000, # The time in reacher is 50 for done=True\n num_steps_per_eval=50, # Not sure how it works | For now, it adds to max_path_length\n batch_size=128,\n max_path_length=50, # The time in reacher is 50 for done=True\n discount=0.99,\n\n soft_target_tau=0.001,\n policy_lr=3E-4,\n qf_lr=3E-4,\n vf_lr=3E-4,\n ),\n net_size=100\n ),\n 'ant': dict(\n env_name='Ant-v2',\n algo_params=dict(\n num_epochs=1000,\n num_steps_per_epoch=1000,\n num_steps_per_eval=1000,\n batch_size=128,\n max_path_length=999,\n discount=0.99,\n\n soft_target_tau=0.001,\n policy_lr=3E-4,\n qf_lr=3E-4,\n vf_lr=3E-4,\n ),\n net_size=500\n ),\n}\n\nAVAILABLE_ENVS = list(ENV_PARAMS.keys())\nDEFAULT_ENV = 'half-cheetah'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env',\n type=str,\n choices=AVAILABLE_ENVS,\n default=DEFAULT_ENV)\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--exp_name', type=str, default=None)\n # parser.add_argument('--exp_name', type=str, default=timestamp())\n # parser.add_argument('--mode', type=str, default='local')\n # parser.add_argument('--log_dir', type=str, default=None)\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.env is None:\n variant = ENV_PARAMS[DEFAULT_ENV]\n else:\n variant = ENV_PARAMS[args.env]\n\n # Net size\n if args.net_size is not None:\n variant['net_size'] = args.net_size\n\n # Experiment name\n if args.exp_name is None:\n exp_name = variant['env_name']\n else:\n exp_name = args.exp_name\n\n setup_logger(exp_name, variant=variant)\n experiment(variant)\n" }, { "alpha_fraction": 0.5420928597450256, "alphanum_fraction": 0.5444532036781311, "avg_line_length": 26.042552947998047, "blob_id": "6786dbefb95142e31c45b0916919ec452a2a0c97", "content_id": "13a02f6faa3b1ff1a9da560c1b3a0a1a4864409c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1271, "license_type": "permissive", "max_line_length": 77, "num_lines": 47, "path": "/robolearn/utils/samplers/finite_path_sampler.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers.rollout import rollout\n\n\nclass FinitePathSampler(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, env, policy, total_paths, max_path_length=1e20,\n deterministic=None, obs_normalizer=None):\n \"\"\"\n\n Args:\n env:\n policy:\n total_samples:\n max_path_length: Maximum interaction samples per path.\n deterministic:\n \"\"\"\n self.env = env\n self.policy = policy\n self._max_path_length = max_path_length\n self._total_paths =total_paths\n self.deterministic = deterministic\n self._obs_normalizer = obs_normalizer\n\n def start_worker(self):\n pass\n\n def shutdown_worker(self):\n pass\n\n def obtain_samples(self):\n \"\"\"\n\n Returns:\n List of paths (list): A list of all the paths obtained until\n max_samples is reached.\n \"\"\"\n paths = []\n for nn in range(self._total_paths):\n path = rollout(\n self.env, self.policy, max_path_length=self._max_path_length,\n deterministic=self.deterministic,\n obs_normalizer=self._obs_normalizer,\n )\n paths.append(path)\n return paths\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 21.55555534362793, "blob_id": "b05c5fb4cd38f3a8f4ef8436d5a03df1de2db4e2", "content_id": "95f6c622cad6f562cc084b3ce33bedcbd2e7f14a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 60, "num_lines": 27, "path": "/robolearn/models/values/q_function.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import abc\nfrom future.utils import with_metaclass\n\n\nclass QFunction(with_metaclass(abc.ABCMeta, object)):\n \"\"\"\n Base state-action value function (Q-function) interface.\n :math:`Q(s_t,a_t)`\n \"\"\"\n def __init__(self, obs_dim, action_dim):\n self._obs_dim = obs_dim\n self._action_dim = action_dim\n\n @abc.abstractmethod\n def get_value(self, observation, action):\n pass\n\n def get_values(self, observations, actions):\n pass\n\n @property\n def obs_dim(self):\n return self._obs_dim\n\n @property\n def action_dim(self):\n return self._action_dim\n" }, { "alpha_fraction": 0.6255594491958618, "alphanum_fraction": 0.6539035439491272, "avg_line_length": 27.323944091796875, "blob_id": "06fdd3fb8922c3a9c7d307b5db1abbb8817f1d8f", "content_id": "aae8031aeb43c91194509cd00a19259676c5bddc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2011, "license_type": "permissive", "max_line_length": 79, "num_lines": 71, "path": "/examples/rl_algos/sql/multigoal_sql_haarnoja.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\" Example script to perform soft Q-learning in the multigoal environment. \"\"\"\nimport numpy as np\n\nfrom rllab.envs.normalized_env import normalize\n\nfrom softqlearning.algorithms import SQL\nfrom softqlearning.misc.kernel import adaptive_isotropic_gaussian_kernel\nfrom softqlearning.environments import MultiGoalEnv\n# from robolearn.envs.multigoal import Navigation2dGoalCompoEnv\nfrom softqlearning.replay_buffers import SimpleReplayBuffer\nfrom softqlearning.value_functions import NNQFunction\nfrom softqlearning.misc.plotter import QFPolicyPlotter\nfrom softqlearning.policies import StochasticNNPolicy\nfrom softqlearning.misc.sampler import SimpleSampler\n\n\ndef test():\n\n env = normalize(MultiGoalEnv())\n\n pool = SimpleReplayBuffer(env_spec=env.spec, max_replay_buffer_size=1e6)\n\n sampler = SimpleSampler(\n max_path_length=30, min_pool_size=100, batch_size=64)\n\n base_kwargs = {\n 'sampler': sampler,\n 'epoch_length': 100,\n 'n_epochs': 1000,\n 'n_train_repeat': 1,\n 'eval_render': True,\n 'eval_n_episodes': 10\n }\n\n M = 128\n policy = StochasticNNPolicy(\n env.spec, hidden_layer_sizes=(M, M), squash=True)\n\n qf = NNQFunction(env_spec=env.spec, hidden_layer_sizes=[M, M])\n\n plotter = QFPolicyPlotter(\n qf=qf,\n policy=policy,\n obs_lst=np.array([[-2.5, 0.0], [0.0, 0.0], [2.5, 2.5]]),\n default_action=[np.nan, np.nan],\n n_samples=100)\n\n algorithm = SQL(\n base_kwargs=base_kwargs,\n env=env,\n pool=pool,\n qf=qf,\n policy=policy,\n plotter=plotter,\n policy_lr=3e-4,\n qf_lr=3e-4,\n value_n_particles=16,\n td_target_update_interval=1000,\n kernel_fn=adaptive_isotropic_gaussian_kernel,\n kernel_n_particles=32,\n kernel_update_ratio=0.5,\n discount=0.99,\n reward_scale=0.1,\n save_full_state=False)\n\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n test()\n input('Press a key to close...')\n" }, { "alpha_fraction": 0.8636363744735718, "alphanum_fraction": 0.8636363744735718, "avg_line_length": 43, "blob_id": "8ca96897e68bce109b3b04a07651887482b24857", "content_id": "07154ba1665093620a5bfd6b7603e66845c067f3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "permissive", "max_line_length": 43, "num_lines": 1, "path": "/robolearn/torch/models/transitions/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .linear_regression import TVLGDynamics\n" }, { "alpha_fraction": 0.5631856322288513, "alphanum_fraction": 0.5661775469779968, "avg_line_length": 29.920705795288086, "blob_id": "ea523fe8e3581b30946ee36e1650845051dca620", "content_id": "e014732168f1dd94210d638cd790df1d121a3174", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7019, "license_type": "permissive", "max_line_length": 84, "num_lines": 227, "path": "/robolearn/torch/algorithms/rl_algos/gps/mdgps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on Fins GPS implementation\nTODO: ALSO MONTGOMORY\n\nhttps://github.com/cbfinn/gps\n\"\"\"\n\nimport torch\n# from torch.autograd import Variable\nimport gtimer as gt\n\nfrom collections import OrderedDict\n\nfrom robolearn.algorithms.rl_algos import RLAlgorithm\nfrom robolearn.torch.algorithms.torch_algorithm import TorchAlgorithm\n\nfrom robolearn.utils import eval_util\nfrom robolearn.utils.logging import logger\nfrom robolearn.utils.samplers.exploration_rollout import exploration_rollout\nfrom robolearn.utils.data_management import PathBuilder\nfrom robolearn.models.policies import ExplorationPolicy\nfrom robolearn.utils.exploration_strategies import SmoothNoiseStrategy\n\nEPS = 1e-6\n\n\ndef assert_shape(tensor, expected_shape):\n tensor_shape = list(tensor.shape)\n assert len(tensor_shape) == len(expected_shape)\n assert all([a == b for a, b in zip(tensor_shape, expected_shape)])\n\n\nclass MDGPS(RLAlgorithm, TorchAlgorithm):\n \"\"\"MDGPS Algorithm\n\n \"\"\"\n def __init__(self,\n local_policies,\n global_policy,\n *args,\n **kwargs):\n \"\"\"\n MDGPS\n \"\"\"\n env = kwargs['env']\n self.local_policies_wrapper = LocalPolWrapper(local_policies, env)\n self.global_policy = global_policy\n\n # MDGPS hyperparameters\n self._traj_opt_inner_iters = kwargs.pop('traj_opt_inner_iters', 1)\n self._train_cond_idxs = kwargs.pop('train_cond_idxs', [0])\n self._test_cond_idxs = kwargs.pop('test_cond_idxs', [0])\n\n super(MDGPS, self).__init__(\n exploration_policy=self.local_policies_wrapper,\n eval_policy=self.global_policy,\n *args,\n **kwargs\n )\n\n def train(self, start_epoch=0):\n # Get snapshot of initial stuff\n if start_epoch == 0:\n self.training_mode(False)\n params = self.get_epoch_snapshot(-1)\n logger.save_itr_params(-1, params)\n\n self._n_env_steps_total = start_epoch * self.num_train_steps_per_epoch\n\n gt.reset()\n gt.set_def_unique(False)\n\n for epoch in gt.timed_for(\n range(start_epoch, self.num_epochs),\n save_itrs=True,\n ):\n self._start_epoch(epoch)\n\n n_policies = self.explo_policy.n_policies\n for cond in self._train_cond_idxs:\n for _ in range(int(self.rollouts_per_epoch/n_policies)):\n self._current_path_builder = PathBuilder()\n\n path = exploration_rollout(self.explo_env,\n self.explo_policy,\n max_path_length=self.max_path_length,\n animated=self._render,\n deterministic=None,\n condition=cond)\n self._handle_path(path)\n self._n_env_steps_total += len(path['observations'])\n\n # Iterative learning step\n gt.stamp('sample')\n self._try_to_train()\n gt.stamp('train')\n\n # Evaluate if requirements are met\n self._try_to_eval(epoch)\n gt.stamp('eval')\n self._end_epoch()\n\n def _do_training(self):\n\n print(\"Getting exploration paths...\")\n exploration_paths = self.get_exploration_paths()\n\n self._update_dynamics_models()\n\n self._compute_samples_cost()\n\n if self._n_total_train_steps == 0:\n print(\"Updating the policy for the first time\")\n self._update_policy()\n\n self._update_policy_linearization()\n\n if self._n_total_train_steps > 0:\n self._update_kl_step_size()\n\n # C-step\n for ii in range(self._traj_opt_inner_iters):\n print(\"TrajOpt inner_iter %02d\" % ii)\n self._update_trajectories()\n\n # S-step\n self._update_policy()\n\n def _update_dynamics_models(self):\n print(\"Update dynamics model\")\n pass\n\n def _compute_samples_cost(self):\n print(\"Evaluate samples costs\")\n pass\n\n def _update_policy(self):\n print(\"Updating the policy\")\n pass\n\n def _update_policy_linearization(self):\n print(\"Update policy linearizations\")\n pass\n\n def _update_kl_step_size(self):\n print(\"Update KL step size\")\n pass\n\n def _update_trajectories(self):\n print(\"Update trajectories\")\n pass\n\n @property\n def torch_models(self):\n return [\n self.global_policy,\n ]\n\n def evaluate(self, epoch):\n # Create a new eval_statistics\n statistics = OrderedDict()\n\n # Update from previous eval_statisics\n if self.eval_statistics is not None:\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n\n logger.log(\"Collecting samples for evaluation\")\n test_paths = self.eval_sampler.obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Test\",\n ))\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n if hasattr(self.explo_env, \"log_diagnostics\"):\n self.explo_env.log_diagnostics(test_paths)\n\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n\nclass LocalPolWrapper(ExplorationPolicy):\n def __init__(self, local_policies, env, noisy=True, sigma=5.0,\n sigma_scale=1.0):\n self._local_policies = local_policies\n self._current_pol_idx = None\n\n action_dim = self._local_policies[-1].action_dim\n ExplorationPolicy.__init__(self,\n action_dim=action_dim)\n\n self._T = self._local_policies[-1].H\n\n self._noisy = noisy\n self.es = SmoothNoiseStrategy(env.action_space,\n horizon=self._T,\n smooth=True,\n renormalize=True,\n sigma=sigma,\n sigma_scale=[sigma_scale]*action_dim)\n\n self._noise = torch.zeros((self.n_policies, self._T, self.action_dim))\n\n def reset(self, condition=None):\n self._current_pol_idx = condition\n self._current_time = 0\n\n self.es.reset()\n\n def get_action(self, *args, **kwargs):\n local_policy = self._local_policies[self._current_pol_idx]\n kwargs['t'] = self._current_time\n self._current_time += 1\n if self._noisy:\n return self.es.get_action(local_policy, *args, **kwargs)\n else:\n return local_policy.get_action(*args, **kwargs)\n\n @property\n def n_policies(self):\n return len(self._local_policies)\n\n @property\n def horizon(self):\n return self._T\n" }, { "alpha_fraction": 0.7551020383834839, "alphanum_fraction": 0.7551020383834839, "avg_line_length": 23.5, "blob_id": "8046ad0be19b26c59f3dd9b8a84719741b220921", "content_id": "52d9c700192e55956396f7a8dcb597d04fd9fc78", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "permissive", "max_line_length": 26, "num_lines": 2, "path": "/robolearn/torch/models/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .values import *\nfrom .transitions import *\n" }, { "alpha_fraction": 0.6397557854652405, "alphanum_fraction": 0.6397557854652405, "avg_line_length": 28.479999542236328, "blob_id": "5f43c252f22524f9ebfb785dea0bf29766865d4a", "content_id": "c2f47a26a73e765a08f693eb0a143368cde6746e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1474, "license_type": "permissive", "max_line_length": 69, "num_lines": 50, "path": "/robolearn/envs/proxy_env.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom gym import Env\nfrom robolearn.utils.serializable import Serializable\n\n\nclass ProxyEnv(Serializable, Env):\n def __init__(self, wrapped_env):\n Serializable.quick_init(self, locals())\n self._wrapped_env = wrapped_env\n self.action_space = self._wrapped_env.action_space\n self.observation_space = self._wrapped_env.observation_space\n\n @property\n def wrapped_env(self):\n return self._wrapped_env\n\n def reset(self, *args, **kwargs):\n return self._wrapped_env.reset(*args, **kwargs)\n\n def step(self, action):\n return self._wrapped_env.step(action)\n\n def render(self, *args, **kwargs):\n return self._wrapped_env.render(*args, **kwargs)\n\n def close(self, *args, **kwargs):\n return self._wrapped_env.close(*args, **kwargs)\n\n def seed(self, *args, **kwargs):\n return self._wrapped_env.seed(*args, **kwargs)\n\n def log_diagnostics(self, paths, *args, **kwargs):\n if hasattr(self._wrapped_env, 'log_diagnostics'):\n self._wrapped_env.log_diagnostics(paths, *args, **kwargs)\n\n @property\n def horizon(self):\n return self._wrapped_env.horizon\n\n def terminate(self):\n if hasattr(self.wrapped_env, \"terminate\"):\n self.wrapped_env.terminate()\n\n @property\n def action_dim(self):\n return np.prod(self.action_space.shape)\n\n @property\n def obs_dim(self):\n return np.prod(self.observation_space.shape)\n" }, { "alpha_fraction": 0.5933353900909424, "alphanum_fraction": 0.5979635715484619, "avg_line_length": 29.575471878051758, "blob_id": "b54de2825ad8e22dbd2d5f038822bdbe56be9283", "content_id": "3d4b7d11ca582f247832929acc4b79b03c8e9ae2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3241, "license_type": "permissive", "max_line_length": 80, "num_lines": 106, "path": "/examples/rl_algos/gps/plot_centauro_mdgps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom robolearn.utils.plots.core import subplots\nfrom robolearn.utils.plots import get_csv_data\n\n\ndef main(args):\n # plot_process_general_data(csv_file=args.file, n_unintentional=args.un,\n # block=False)\n\n # plot_process_iu_values_errors(csv_file=args.file, n_unintentional=args.un,\n # block=False)\n #\n # plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,\n # block=False)\n #\n # plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,\n # block=False)\n\n plot_eta(csv_file=args.file, n_local_pols=args.n_locals)\n\n plot_global_pol_return(csv_file=args.file, n_local_pols=args.n_locals)\n\n\ndef plot_global_pol_return(csv_file, n_local_pols=1, block=False):\n labels_to_plot = ['Global Mean Return']\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_local_pols):\n new_string = ('[Cond-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n # new_string = '' + label\n # new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_local_pols)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Avg Return and Avg Reward',\n fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n if n_subplots > 0:\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=block)\n\n\ndef plot_eta(csv_file, n_local_pols=1, block=False):\n labels_to_plot = ['Eta']\n\n # Add Intentional-Unintentional Label\n new_labels = list()\n for label in labels_to_plot:\n for uu in range(n_local_pols):\n new_string = ('[Cond-%02d] ' % uu) + label\n new_labels.append(new_string)\n\n # new_string = '' + label\n # new_labels.append(new_string)\n\n n_subplots = len(labels_to_plot) * (n_local_pols)\n\n data = get_csv_data(csv_file, new_labels)\n\n fig, axs = subplots(n_subplots)\n if not isinstance(axs, np.ndarray):\n axs = np.array([axs])\n fig.subplots_adjust(hspace=0)\n fig.suptitle('Duals', fontweight='bold')\n\n for aa, ax in enumerate(axs):\n ax.plot(data[aa])\n ax.set_ylabel(new_labels[aa])\n plt.setp(ax.get_xticklabels(), visible=False)\n\n if n_subplots > 0:\n axs[-1].set_xlabel('Episodes')\n plt.setp(axs[-1].get_xticklabels(), visible=True)\n\n plt.show(block=block)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./progress.csv',\n help='path to the progress.csv file')\n parser.add_argument('--n_locals', type=int, default=1,\n help='N locals')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n" }, { "alpha_fraction": 0.49670782685279846, "alphanum_fraction": 0.5689986348152161, "avg_line_length": 36.38461685180664, "blob_id": "cee1f098c276b52b300eee40acf95eca2a48f421", "content_id": "04e1ef605d4d06fa84b7c54d81737d6dd8ccf002", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7290, "license_type": "permissive", "max_line_length": 96, "num_lines": 195, "path": "/scripts/sim_centauro_tray.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers import rollout\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import set_gpu_mode\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn.torch.policies import MultiPolicySelector\nfrom robolearn.torch.policies import WeightedMultiPolicySelector\nfrom robolearn.torch.policies import TanhGaussianPolicy\nfrom robolearn.models.policies import MakeDeterministic\nfrom robolearn.models.policies import ExplorationPolicy\nimport os\nfrom robolearn.utils.plots import plot_reward_composition\nfrom robolearn.utils.plots import plot_reward_iu\nfrom robolearn.utils.plots import plot_weigths_unintentionals\nfrom robolearn.utils.plots import plot_q_vals\n\nimport argparse\nimport joblib\nimport uuid\nfrom robolearn.utils.logging import logger\nimport json\nimport numpy as np\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfilename = str(uuid.uuid4())\nSEED = 110\n\n\ndef simulate_policy(args):\n\n np.random.seed(SEED)\n ptu.seed(SEED)\n\n data = joblib.load(args.file)\n if args.deterministic:\n if args.un > -1:\n print('Using the deterministic version of the UNintentional policy '\n '%02d.' % args.un)\n if 'u_policy' in data:\n policy = MakeDeterministic(\n MultiPolicySelector(data['u_policy'], args.un))\n # WeightedMultiPolicySelector(data['u_policy'], args.un))\n else:\n # policy = MakeDeterministic(data['u_policies'][args.un])\n if isinstance(data['policy'], TanhGaussianPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = MakeDeterministic(\n WeightedMultiPolicySelector(data['policy'], args.un)\n )\n else:\n print('Using the deterministic version of the Intentional policy.')\n if isinstance(data['policy'], ExplorationPolicy):\n policy = MakeDeterministic(data['policy'])\n else:\n policy = data['policy']\n else:\n if args.un > -1:\n print('Using the UNintentional stochastic policy %02d' % args.un)\n if 'u_policy' in data:\n # policy = MultiPolicySelector(data['u_policy'], args.un)\n policy = WeightedMultiPolicySelector(data['u_policy'], args.un)\n else:\n policy = WeightedMultiPolicySelector(data['policy'], args.un)\n # policy = data['policy'][args.un]\n else:\n print('Using the Intentional stochastic policy.')\n # policy = data['exploration_policy']\n policy = data['policy']\n\n print(\"Policy loaded!!\")\n\n # Load environment\n dirname = os.path.dirname(args.file)\n with open(os.path.join(dirname, 'variant.json')) as json_data:\n log_data = json.load(json_data)\n env_params = log_data['env_params']\n H = int(log_data['path_length'])\n env_params['is_render'] = True\n\n if 'obs_mean' in data.keys():\n obs_mean = data['obs_mean']\n print('OBS_MEAN')\n print(repr(obs_mean))\n else:\n obs_mean = None\n # obs_mean = np.array([ 0.07010766, 0.37585765, 0.21402615, 0.24426296, 0.5789634 ,\n # 0.88510203, 1.6878743 , 0.02656335, 0.03794186, -1.0241051 ,\n # -0.5226027 , 0.6198239 , 0.49062446, 0.01197532, 0.7888951 ,\n # -0.4857273 , 0.69160587, -0.00617676, 0.08966777, -0.14694819,\n # 0.9559917 , 1.0450271 , -0.40958315, 0.86435956, 0.00609685,\n # -0.01115279, -0.21607827, 0.9762933 , 0.80748135, -0.48661205,\n # 0.7473679 , 0.01649722, 0.15451911, -0.17285274, 0.89978695])\n\n if 'obs_var' in data.keys():\n obs_var = data['obs_var']\n print('OBS_VAR')\n print(repr(obs_var))\n else:\n obs_var = None\n # obs_var = np.array([0.10795759, 0.12807205, 0.9586606 , 0.46407 , 0.8994803 ,\n # 0.35167143, 0.30286264, 0.34667444, 0.35105848, 1.9919134 ,\n # 0.9462659 , 2.245269 , 0.84190637, 1.5407104 , 0.1 ,\n # 0.10330457, 0.1 , 0.1 , 0.1 , 0.1528581 ,\n # 0.1 , 0.1 , 0.1 , 0.1 , 0.1 ,\n # 0.1 , 0.1 , 0.1 , 0.1 , 0.12320185,\n # 0.1 , 0.18369523, 0.200373 , 0.11895574, 0.15118493])\n print(env_params)\n\n if args.subtask and args.un != -1:\n env_params['subtask'] = args.un\n # else:\n # env_params['subtask'] = None\n\n env = NormalizedBoxEnv(\n CentauroTrayEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n print(\"Environment loaded!!\")\n\n if args.gpu:\n set_gpu_mode(True)\n policy.cuda()\n if isinstance(policy, MakeDeterministic):\n if isinstance(policy.stochastic_policy, PyTorchModule):\n policy.stochastic_policy.train(False)\n else:\n if isinstance(policy, PyTorchModule):\n policy.train(False)\n\n while True:\n if args.record:\n rollout_start_fcn = lambda: \\\n env.start_recording_video('centauro_video.mp4')\n rollout_end_fcn = lambda: \\\n env.stop_recording_video()\n else:\n rollout_start_fcn = None\n rollout_end_fcn = None\n\n obs_normalizer = data.get('obs_normalizer')\n\n if args.H != -1:\n H = args.H\n\n path = rollout(\n env,\n policy,\n max_path_length=H,\n animated=True,\n obs_normalizer=obs_normalizer,\n rollout_start_fcn=rollout_start_fcn,\n rollout_end_fcn=rollout_end_fcn,\n )\n plot_rollout_reward(path)\n\n if hasattr(env, \"log_diagnostics\"):\n env.log_diagnostics([path])\n\n logger.dump_tabular()\n\n if args.record:\n break\n\n\ndef plot_rollout_reward(path):\n import matplotlib.pyplot as plt\n rewards = np.squeeze(path['rewards'])\n\n plt.plot(rewards)\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, default='./params.pkl',\n help='path to the snapshot file')\n parser.add_argument('--H', type=int, default=-1,\n help='Max length of rollout')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--deterministic', action=\"store_true\")\n parser.add_argument('--record', action=\"store_true\")\n parser.add_argument('--env', type=str, default='manipulator')\n parser.add_argument('--un', type=int, default=-1,\n help='Unintentional id')\n parser.add_argument('--subtask', action='store_true')\n args = parser.parse_args()\n\n simulate_policy(args)\n input('Press a key to finish the script')\n" }, { "alpha_fraction": 0.5772008299827576, "alphanum_fraction": 0.5998438596725464, "avg_line_length": 26.105819702148438, "blob_id": "5280d90cb88aba10e56ee867121064b34c273693", "content_id": "7ea3180bcff7f15496302b47e5233d0446cc9687", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5123, "license_type": "permissive", "max_line_length": 75, "num_lines": 189, "path": "/examples/robolearn_gym/robolearn_gym_all_sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Soft Actor Critic on some Gym Envs.\n\nNOTE: You need PyTorch 0.3 or more (to have torch.distributions)\n\"\"\"\nimport gym\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\n\nfrom robolearn.torch.policies import TanhGaussianPolicy\nfrom rlkit.torch.sac.sac import SoftActorCritic\nfrom robolearn.torch.utils.nn import FlattenMlp\n\nimport argparse\n\n\ndef experiment(variant):\n ptu._use_gpu = variant['gpu']\n env = NormalizedBoxEnv(gym.make(variant['env_name']))\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n net_size = variant['net_size']\n\n qf = FlattenMlp(\n hidden_sizes=[net_size, net_size],\n input_size=obs_dim + action_dim,\n output_size=1,\n )\n vf = FlattenMlp(\n hidden_sizes=[net_size, net_size],\n input_size=obs_dim,\n output_size=1,\n )\n policy = TanhGaussianPolicy(\n hidden_sizes=[net_size, net_size],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n\n algorithm = SoftActorCritic(\n env=env,\n training_env=env,\n save_environment=False,\n policy=policy,\n qf=qf,\n vf=vf,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n\n algorithm.train()\n\n return algorithm\n\n\nSHARED_PARAMS = dict(\n # Common RLAlgo params\n num_steps_per_epoch=1000, # Epoch length\n num_updates_per_env_step=1, # Like n_train_repeat??\n num_steps_per_eval=1000, # like eval_n_episodes??\n # EnvSampler params\n max_path_length=1000,\n render=False,\n # ReplayBuffer params\n batch_size=128,\n min_buffer_size=1000, # Minimum buffer size to start training\n replay_buffer_size=1e6,\n # SoftAC params\n soft_target_tau=0.001,\n policy_lr=3e-4,\n qf_lr=3e-4,\n vf_lr=3e-4,\n\n # Reward params\n discount=0.99,\n\n)\n\nENV_PARAMS = {\n 'cogimon': dict(\n env_name='CogimonLocomotionBulletEnv-v0',\n algo_params=dict(\n num_epochs=10000,\n max_path_length=1000,\n min_buffer_size=1000,\n reward_scale=10,\n ),\n net_size=500,\n ),\n 'cogimon-render': dict(\n env_name='CogimonLocomotionBulletEnvRender-v0',\n algo_params=dict(\n num_epochs=10000,\n max_path_length=1000,\n min_buffer_size=1000,\n reward_scale=10,\n ),\n net_size=500,\n ),\n 'manipulator': dict(\n env_name='Reacher2D3DofObstacleEnv-v0',\n algo_params=dict(\n num_epochs=1000,\n max_path_length=500,\n min_buffer_size=500,\n reward_scale=10,\n ),\n net_size=128,\n ),\n 'manipulator-render': dict(\n env_name='Pusher2D3DofObstacleBulletEnvRender-v0',\n algo_params=dict(\n num_epochs=1000,\n max_path_length=500,\n min_buffer_size=500,\n reward_scale=10,\n ),\n net_size=128,\n ),\n}\n\nAVAILABLE_ENVS = list(ENV_PARAMS.keys())\nDEFAULT_ENV = 'cogimon'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env',\n type=str,\n choices=AVAILABLE_ENVS,\n default=DEFAULT_ENV)\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='last')\n parser.add_argument('--snap_gap', type=int, default=100)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--gpu', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.env is None:\n expt_variant = ENV_PARAMS[DEFAULT_ENV]\n else:\n if args.render:\n args.env = args.env + '-render'\n expt_variant = ENV_PARAMS[args.env]\n\n default_algo_params = SHARED_PARAMS\n for param in default_algo_params:\n if param not in expt_variant['algo_params'].keys():\n expt_variant['algo_params'][param] = default_algo_params[param]\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = expt_variant['env_name']\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algorithm = experiment(expt_variant)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.5590000152587891, "alphanum_fraction": 0.559499979019165, "avg_line_length": 31.25806427001953, "blob_id": "4578f4a663319d3970600056abad34a74bdc798e", "content_id": "cff8169bcac3bf5f44fe34c559b6ed858e0e0e9c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "permissive", "max_line_length": 78, "num_lines": 62, "path": "/robolearn/utils/samplers/in_place_path_sampler.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.samplers.rollout import rollout\n\n\nclass InPlacePathSampler(object):\n \"\"\"\n A sampler that does not serialize for sampling. Instead, it just uses\n the current policy and environment as-is.\n\n WARNING: This will affect the environment! So\n ```\n sampler = InPlacePathSampler(env, ...)\n sampler.obtain_samples # this has side-effects: env will change!\n ```\n \"\"\"\n def __init__(self, env, policy, total_samples, max_path_length,\n deterministic=None, obs_normalizer=None):\n \"\"\"\n\n Args:\n env:\n policy:\n total_samples:\n max_path_length: Maximum interaction samples per path.\n deterministic:\n \"\"\"\n self.env = env\n self.policy = policy\n self._max_path_length = max_path_length\n self._total_samples = total_samples\n if not total_samples >= max_path_length:\n raise ValueError(\"Need total_samples >= max_path_length (%d >=%d)\"\n % (total_samples, max_path_length))\n self.deterministic = deterministic\n self._obs_normalizer = obs_normalizer\n\n def start_worker(self):\n pass\n\n def shutdown_worker(self):\n pass\n\n def obtain_samples(self):\n \"\"\"\n\n Returns:\n List of paths (list): A list of all the paths obtained until\n max_samples is reached.\n \"\"\"\n paths = []\n n_steps_total = 0\n while n_steps_total < self._total_samples:\n # Execute a single rollout\n max_length = min(self._total_samples - n_steps_total,\n self._max_path_length)\n path = rollout(\n self.env, self.policy, max_path_length=max_length,\n deterministic=self.deterministic,\n obs_normalizer=self._obs_normalizer,\n )\n paths.append(path)\n n_steps_total += len(path['observations'])\n return paths\n" }, { "alpha_fraction": 0.6386831402778625, "alphanum_fraction": 0.6551440358161926, "avg_line_length": 27.904762268066406, "blob_id": "17cc9f162926f8a696dfb99e97420ec7e093c07c", "content_id": "0a84f8c89cea8a05029f6efc5e38db0536ee5de2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1215, "license_type": "permissive", "max_line_length": 76, "num_lines": 42, "path": "/scenarios/dualist_gps/borrar.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom robolearn.old_utils.experience_buffer import ExperienceBuffer\n\nN_trajs = 8\nT = 2\nbuffer_size = 3\ngood_or_bad = 'good'\ntemp_or_cost = 'cost'\n\nall_trajs = list()\nall_costs = list()\nfor ii in range(10):\n all_trajs.append(np.ones(T)*ii)\n all_costs.append(all_trajs[-1]*10)\n\nindex_order = np.arange(N_trajs)\nnp.random.shuffle(index_order)\n\nexperience_buffer = ExperienceBuffer(buffer_size, good_or_bad, temp_or_cost)\n\nfor ii in index_order:\n print(all_trajs[ii])\n\nprint('##' * 20)\n\nfor nn in range(len(index_order) // 2):\n trajs_to_add = [all_trajs[index_order[2 * nn]],\n all_trajs[index_order[2 * nn + 1]]]\n costs_to_add = [all_costs[index_order[2 * nn]],\n all_costs[index_order[2 * nn + 1]]]\n print('trajs', trajs_to_add)\n print('costs', costs_to_add)\n experience_buffer.add(trajs_to_add, costs_to_add)\n print('len', len(experience_buffer))\n print('btrajs', experience_buffer._trajs)\n print('bcosts', experience_buffer._costs)\n print('--')\n\n\nprint('TRAJS WITH OPS', experience_buffer.get_trajs(2))\nprint('COsts WITH OPS', experience_buffer.get_costs(2))\nprint('BOTH WITH OPS', experience_buffer.get_trajs_and_costs(2))\n\n" }, { "alpha_fraction": 0.8113207817077637, "alphanum_fraction": 0.8113207817077637, "avg_line_length": 34.66666793823242, "blob_id": "15262db5c08ba3542b749062218bf0c902c90f68", "content_id": "137cf0d87d9ec712a8ef0125b6b100efacc571d9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "permissive", "max_line_length": 36, "num_lines": 3, "path": "/robolearn/torch/utils/ops/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .logsumexp import logsumexp\nfrom .log_sum_exp import log_sum_exp\nfrom .size_splits import size_splits" }, { "alpha_fraction": 0.5192674994468689, "alphanum_fraction": 0.5369540452957153, "avg_line_length": 45.719154357910156, "blob_id": "c539c1448535475cd5ca495f5da13ef31bd8dfed", "content_id": "d3f0bb838a9c99500e372dd2bbfcb360ce5f5894", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28779, "license_type": "permissive", "max_line_length": 191, "num_lines": 616, "path": "/scenarios/tests/reacher_trajopt/scenario.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\nimport yaml\nfrom builtins import input\n\nimport numpy as np\nfrom robolearn.old_envs.pusher3dof import Pusher3DofBulletEnv\nfrom robolearn.old_utils.sample.sampler import Sampler\n\nfrom robolearn.old_agents import NoPolAgent\nfrom robolearn.old_algos.trajopt.dual_trajopt import DualTrajOpt\n# Costs\nfrom robolearn.old_costs.cost_action import CostAction\n# from robolearn.costs.cost_fk import CostFK\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_safe_distance import CostSafeDistance\nfrom robolearn.old_costs.cost_state_difference import CostStateDifference\nfrom robolearn.old_costs.cost_safe_state_difference import CostSafeStateDifference\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\nfrom robolearn.old_costs.cost_utils import evall1l2term\n# from robolearn.envs import BigmanEnv\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_dual_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_policies.lin_gauss_policy import LinearGaussianPolicy\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\nfrom robolearn.old_utils.print_utils import change_print_color\nfrom robolearn.old_utils.transformations_utils import create_quat_pose\nfrom robolearn.old_utils.traj_opt.dualist_traj_opt import DualistTrajOpt\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been killed by the user!!\")\n os._exit(1)\n\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\nclass Scenario(object):\n \"\"\"Defines a RL scenario (environment, agent and learning algorithm)\n\n \"\"\"\n def __init__(self, hyperparams):\n\n self.hyperparams = hyperparams\n\n # Task Parameters\n yaml_path = os.path.dirname(__file__) + '/task_parameters.yaml'\n assert(os.path.exists(yaml_path))\n with open(yaml_path, 'r') as f:\n self.task_params = yaml.load(f)\n\n Tend = self.task_params['Tend']\n Ts = self.task_params['Ts']\n self.task_params['T'] = int(Tend/Ts)\n\n if self.hyperparams['render']:\n self.task_params['render'] = self.hyperparams['render']\n\n self.task_params['seed'] = self.hyperparams['seed']\n\n # Numpy max\n os.environ['OMP_NUM_THREADS'] = str(self.task_params['np_threads'])\n\n # Environment\n self.env = self.create_environment()\n\n self.action_dim = self.env.action_dim\n self.state_dim = self.env.state_dim\n self.obs_dim = self.env.obs_dim\n\n # Agent\n self.agent = self.create_agent()\n\n # Costs\n self.cost = self.create_cost()\n\n # Initial Conditions\n self.init_cond = self.create_init_conditions()\n\n # Learning Algorithm\n self.learn_algo = self.create_learning_algo()\n\n def create_environment(self):\n \"\"\"Instantiate an specific RL environment to interact with.\n\n Returns:\n RL environment\n\n \"\"\"\n change_print_color.change('BLUE')\n print(\"\\nCreating Environment...\")\n\n # Environment parameters\n env_with_img = False\n rdn_tgt_pos = False\n render = self.task_params['render']\n obs_like_mjc = self.task_params['obs_like_mjc']\n ntargets = self.task_params['ntargets']\n tgt_weights = self.task_params['tgt_weights']\n tgt_positions = self.task_params['tgt_positions']\n tgt_types = self.task_params['tgt_types']\n sim_timestep = 0.001\n frame_skip = int(self.task_params['Ts']/sim_timestep)\n\n env = Pusher3DofBulletEnv(render=render, obs_with_img=env_with_img,\n obs_mjc_gym=obs_like_mjc, ntargets=ntargets,\n rdn_tgt_pos=rdn_tgt_pos, tgt_types=tgt_types,\n sim_timestep=sim_timestep,\n frame_skip=frame_skip)\n\n env.set_tgt_cost_weights(tgt_weights)\n env.set_tgt_pos(tgt_positions)\n\n print(\"Environment:%s OK!.\" % type(env).__name__)\n\n return env\n\n def create_agent(self):\n \"\"\"Instantiate the RL agent who interacts with the environment.\n\n Returns:\n RL agent\n\n \"\"\"\n change_print_color.change('CYAN')\n print(\"\\nCreating Agent...\")\n\n agent = NoPolAgent(act_dim=self.action_dim, obs_dim=self.obs_dim,\n state_dim=self.state_dim,\n agent_name='agent'+str('%02d' %\n self.hyperparams['run_num']))\n print(\"Agent:%s OK\\n\" % type(agent).__name__)\n\n return agent\n\n def create_cost(self):\n \"\"\"Instantiate the cost that evaluates the RL agent performance.\n\n Returns:\n Cost Function\n\n \"\"\"\n change_print_color.change('GRAY')\n print(\"\\nCreating Costs...\")\n\n # Action Cost\n weight = 1e0 # 1e-4\n target = None\n act_cost = {\n 'type': CostAction,\n 'wu': np.ones(self.action_dim) * weight,\n 'target': target, # Target action value\n }\n\n # # FK Cost\n # fk_l1_cost = {\n # 'type': CostFK,\n # 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n # 'target_pose': target_distance_hand,\n # 'tgt_data_type': 'state', # 'state' or 'observation'\n # 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n # 'op_point_name': hand_name,\n # 'op_point_offset': hand_offset,\n # 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n # 'joint_ids': bigman_params['joint_ids'][body_part_active],\n # 'robot_model': robot_model,\n # 'wp': np.array([3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'evalnorm': evall1l2term,\n # #'evalnorm': evallogl2term,\n # 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n # 'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n # 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n # 'wp_final_multiplier': 1, # 10\n # }\n #\n # fk_l2_cost = {\n # 'type': CostFK,\n # 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n # 'target_pose': target_distance_hand,\n # 'tgt_data_type': 'state', # 'state' or 'observation'\n # 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n # 'op_point_name': hand_name,\n # 'op_point_offset': hand_offset,\n # 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n # 'joint_ids': bigman_params['joint_ids'][body_part_active],\n # 'robot_model': robot_model,\n # # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n # #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'evalnorm': evall1l2term,\n # #'evalnorm': evallogl2term,\n # 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n # 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n # 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n # 'wp_final_multiplier': 1, # 10\n # }\n\n\n # State costs\n target_distance_object = np.zeros(2)\n # input(self.env.get_state_info())\n # input(self.env.get_state_info(name='tgt0')['idx'])\n state_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt0': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None,\n 'data_idx': self.env.get_state_info(name='tgt0')['idx']\n },\n },\n }\n\n state_final_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt0': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None,\n 'data_idx': self.env.get_state_info(name='tgt0')['idx']\n },\n },\n }\n\n cost_safe_distance = {\n 'type': CostSafeDistance,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt1': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'safe_distance': np.array([0.15, 0.15]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([1.0, 1.0]),\n 'data_idx': self.env.get_state_info(name='tgt1')['idx']\n },\n },\n }\n\n state_diff_weights = self.task_params['state_diff_weights']\n l1_l2_weights = np.array(self.task_params['l1_l2'])\n inside_cost = self.task_params['inside_cost']\n\n cost_state_difference = {\n 'type': CostStateDifference,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': l1_l2_weights[0], # Weight for l1 norm\n 'l2': l1_l2_weights[1], # Weight for l2 norm\n 'alpha': 1e-10, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'],\n 'idx_to_use': [0, 1, 2], # All: X, Y, theta\n 'wp': np.array(state_diff_weights), # State weights - must be set.\n 'average': None,\n 'target_state': 'tgt0', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt0')['idx'],\n },\n },\n }\n\n cost_final_state_difference = {\n 'type': CostStateDifference,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': l1_l2_weights[0], # Weight for l1 norm\n 'l2': l1_l2_weights[1], # Weight for l2 norm\n 'alpha': 1e-10, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'],\n 'idx_to_use': [0, 1, 2], # All: X, Y, theta\n 'wp': np.array(state_diff_weights), # State weights - must be set.\n 'average': None,\n 'target_state': 'tgt0', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt0')['idx'],\n },\n },\n }\n\n safe_radius = 0.15\n cost_safe_state_difference = {\n 'type': CostSafeStateDifference,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'][:2],\n 'idx_to_use': [0, 1], # Only X and Y\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': 'tgt1', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt1')['idx'][:2],\n 'safe_distance': np.sqrt([safe_radius**2/2, safe_radius**2/2]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([inside_cost, inside_cost]),\n },\n },\n }\n\n cost_final_safe_state_difference = {\n 'type': CostSafeStateDifference,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'][:2],\n 'idx_to_use': [0, 1], # Only X and Y\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': 'tgt1', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt1')['idx'][:2],\n 'safe_distance': np.sqrt([safe_radius**2/2, safe_radius**2/2]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([inside_cost, inside_cost]),\n },\n },\n }\n\n\n # Sum costs\n # costs_and_weights = [(act_cost, 1.0e-1),\n des_weights = self.task_params['cost_weights']\n print('Costs weights:', des_weights)\n costs_and_weights = [(act_cost, des_weights[0]),\n # # (fk_cost, 1.0e-0),\n # (fk_l1_cost, 1.5e-1),\n # (fk_l2_cost, 1.0e-0),\n # # (fk_final_cost, 1.0e-0),\n # (fk_l1_final_cost, 1.5e-1),\n # (fk_l2_final_cost, 1.0e-0),\n (cost_state_difference, des_weights[1]),\n (cost_final_state_difference, des_weights[2]),\n (cost_safe_state_difference, des_weights[3]),\n (cost_final_safe_state_difference, des_weights[4]),\n # WORKING:\n # (cost_safe_distance, 1.0e+1),\n # (state_cost_distance, 5.0e-0),\n # (state_final_cost_distance, 1.0e+3),\n ]\n\n cost_sum = {\n 'type': CostSum,\n 'costs': [cw[0] for cw in costs_and_weights],\n 'weights': [cw[1] for cw in costs_and_weights],\n }\n\n return cost_sum\n\n def create_init_conditions(self):\n \"\"\"Defines the initial conditions for the environment.\n\n Returns:\n Environment' initial conditions.\n\n \"\"\"\n change_print_color.change('MAGENTA')\n print(\"\\nCreating Initial Conditions...\")\n initial_cond = self.task_params['init_cond']\n\n ddof = 3 # Data dof (file): x, y, theta\n pdof = 3 # Pose dof (env): x, y, theta\n ntgt = self.task_params['ntargets']\n\n for cc, cond in enumerate(initial_cond):\n env_condition = np.zeros(self.env.obs_dim)\n env_condition[:self.env.action_dim] = np.deg2rad(cond[:3])\n cond_idx = 2*self.env.action_dim + pdof # EE pose will be obtained from sim\n data_idx = self.env.action_dim\n for tt in range(self.task_params['ntargets']):\n tgt_data = cond[data_idx:data_idx+ddof]\n # tgt_pose = create_quat_pose(pos_x=tgt_data[0],\n # pos_y=tgt_data[1],\n # pos_z=z_fix,\n # rot_yaw=np.deg2rad(tgt_data[2]))\n # env_condition[cond_idx:cond_idx+pdof] = tgt_pose\n tgt_data[2] = np.deg2rad(tgt_data[2])\n env_condition[cond_idx:cond_idx+pdof] = tgt_data\n cond_idx += pdof\n data_idx += ddof\n\n self.env.add_init_cond(env_condition)\n\n return self.env.get_conditions()\n\n def create_learning_algo(self):\n \"\"\"Instantiates the RL algorithm\n\n Returns:\n Learning algorithm\n\n \"\"\"\n change_print_color.change('YELLOW')\n print(\"\\nConfiguring learning algorithm...\\n\")\n\n # Dynamics\n learned_dynamics = {'type': DynamicsLRPrior,\n 'regularization': 1e-6,\n 'prior': {\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n 'strength': 1.0, # Adjusts the strength of the prior.\n },\n }\n\n init_traj_distr = {'type': init_pd,\n 'init_var': np.array([1.0, 1.0, 1.0])*5.0e-01,\n 'pos_gains': 0.001, # float or array\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': self.env.get_total_joints(), # Total joints in state\n 'state_to_pd': 'joints', # Joints\n 'dDistance': 6,\n }\n\n # Trajectory Optimization Method\n traj_opt_method = {\n 'type': DualistTrajOpt,\n 'bad_const': self.task_params['consider_bad'], # Use bad constraints\n 'good_const': self.task_params['consider_good'], # Use good constraints\n 'del0': 1e-4, # Eta updates for non-SPD Q-function (non-SPD correction step).\n 'del0_bad': 1e-8, # Nu updates for non-SPD Q-function (non-SPD correction step).\n 'del0_good': 1e-4, # Omega updates for non-SPD Q-function (non-SPD correction step).\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'min_nu': 1e-8, # At min_nu, kl_div > kl_step\n 'max_nu': self.task_params['max_nu'], # At max_nu, kl_div < kl_step,\n 'min_omega': 1e-8, # At min_omega, kl_div > kl_step\n 'max_omega': self.task_params['max_omega'], #1e16, # At max_omega, kl_div < kl_step\n 'step_tol': 0.1,\n 'bad_tol': 0.1,\n 'good_tol': 0.1,\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step. #TODO: IF TRUE, MAYBE IT DOES WORK WITH MDGPS because it doesn't consider dual vars\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n 'adam_alpha': 0.5,\n 'adam_max_iter': 500,\n 'weight_bad': self.task_params['weight_bad'],\n 'weight_good': self.task_params['weight_good'],\n }\n\n good_trajs = None\n bad_trajs = None\n dualtrajopt_hyperparams = {\n 'inner_iterations': self.task_params['inner_iterations'], # Times the trajectories are updated\n # G/B samples selection\n 'good_samples': good_trajs, # Good samples demos\n 'bad_samples': bad_trajs, # Bad samples demos\n 'n_good_samples': self.task_params['n_good_samples'], # Number of good samples per each trajectory\n 'n_bad_samples': self.task_params['n_bad_samples'], # Number of bad samples per each trajectory\n 'n_good_buffer': self.task_params['n_good_buffer'], # Number of good samples in the buffer\n 'n_bad_buffer': self.task_params['n_bad_buffer'], # Number of bad samples in the buffer\n 'good_traj_selection_type': self.task_params['good_traj_selection_type'], # 'always', 'only_traj'\n 'bad_traj_selection_type': self.task_params['bad_traj_selection_type'], # 'always', 'only_traj'\n 'bad_costs': self.task_params['bad_costs'],\n # G/B samples fitting\n 'duality_dynamics_type': 'duality', # Samples to use to update the dynamics 'duality', 'iteration'\n # Initial dual variables\n 'init_eta': 0.1,#4.62,\n 'init_nu': 1e-8,\n 'init_omega': 1e-8,\n # KL step (epsilon)\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'kl_step': self.task_params['kl_step'], # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step)\n 'max_step_mult': 10.0, # Max possible value of step multiplier (multiplies kl_step)\n # KL bad (xi)\n 'kl_bad': self.task_params['kl_bad'], #4.2 # Xi KL base value | kl_div_b >= kl_bad\n 'min_bad_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_bad)\n 'max_bad_mult': 10.0, # Max possible value of step multiplier (multiplies base_kl_bad)\n # KL good (chi)\n 'kl_good': self.task_params['kl_good'], #2.0, # Chi KL base value | kl_div_g <= kl_good\n 'min_good_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_good)\n 'max_good_mult': 10.0, # Max possible value of step multiplier (multiplies base_kl_good)\n # LinearPolicy 'projection'\n 'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add', # Mode to update dynamics prior (Not used in ConstantPolicyPrior)\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n 'min_bad_var': np.array([3.0, 3.0, 3.0])*1.0e-02,\n 'min_good_var': np.array([3.0, 3.0, 3.0])*1.0e-02,\n # TEMP Hyperparams\n 'min_bad_rel_diff': self.task_params['min_bad_rel_diff'],\n 'max_bad_rel_diff': self.task_params['max_bad_rel_diff'],\n 'mult_bad_rel_diff': self.task_params['mult_bad_rel_diff'],\n 'good_fix_rel_multi': self.task_params['good_fix_rel_multi'],\n }\n\n gps_hyperparams = {\n 'T': self.task_params['T'], # Total points\n 'dt': self.task_params['Ts'],\n 'iterations': self.task_params['iterations'], # GPS episodes --> K iterations\n 'sample_real_time': False,\n # Samples\n 'num_samples': self.task_params['num_samples'], # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'seed': self.task_params['seed'],\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n 'smooth_noise_var': 5.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': 1.e-1*np.ones(self.action_dim), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n # Cost\n 'cost': self.cost,\n # Conditions\n 'conditions': len(self.init_cond), # Total number of initial conditions\n 'train_conditions': self.task_params['train_cond'], # Indexes of conditions used for training\n # TrajDist\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': True,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-2, # Max value for x0sigma in trajectories\n # TrajOpt\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization #TODO: CHECK THIS VALUE\n # Others\n 'algo_hyperparams': dualtrajopt_hyperparams,\n 'data_files_dir': self.hyperparams['log_dir'],\n }\n\n return DualTrajOpt(self.agent, self.env, **gps_hyperparams)\n\n def train(self, itr_load=None):\n \"\"\"Train the RL agent with the learning algorithm.\n\n Args:\n itr_load: Iteration number with which to start\n\n Returns:\n bool: True for success, False otherwise.\n\n \"\"\"\n change_print_color.change('WHITE')\n return self.learn_algo.run(itr_load)\n\n def test_policy(self, pol_type=None, condition=0, iteration=-1):\n \"\"\"Test the RL agent using the policy learned in the specificied\n iteration in the specific condition.\n\n Args:\n pol_type: 'global' or 'local'\n condition: Condition number to test the agent\n iteration: Iteration to test the agent\n\n Returns:\n bool: True for success, False otherwise.\n\n \"\"\"\n noise = np.zeros((self.task_params['T'], self.agent.act_dim))\n\n if iteration == -1:\n for rr in range(600):\n temp_path = self.hyperparams['log_dir'] + ('/itr_%02d' % rr)\n if os.path.exists(temp_path):\n iteration += 1\n\n if iteration == -1:\n print(\"There is not itr_XX data in '%s'\"\n % self.hyperparams['log_dir'])\n return False\n\n dir_path = 'itr_%02d/' % iteration\n\n # itr_data_file = dir_path + 'iteration_data_itr_%02d.pkl' % iteration\n itr_data_file = dir_path + 'traj_distr_params_itr_%02d.pkl' % iteration\n\n change_print_color.change('BLUE')\n print(\"\\nLoading iteration data '%s'...\" % itr_data_file)\n\n itr_data = self.learn_algo.data_logger.unpickle(itr_data_file)\n # policy = itr_data[condition].traj_distr\n policy = LinearGaussianPolicy(itr_data[condition]['K'],\n itr_data[condition]['k'],\n itr_data[condition]['pol_covar'],\n itr_data[condition]['chol_pol_covar'],\n itr_data[condition]['inv_pol_covar'])\n\n stop = False\n while stop is False:\n self.env.reset(condition=condition)\n input('Press a key to start sampling...')\n sample = self.agent.sample(self.env, condition, self.task_params['T'],\n self.task_params['Ts'], noise, policy=policy,\n save=False)\n answer = input('Execute again. Write (n/N) to stop:')\n if answer.lower() in ['n']:\n stop = True\n\n return True\n" }, { "alpha_fraction": 0.6284129023551941, "alphanum_fraction": 0.6395116448402405, "avg_line_length": 25.815475463867188, "blob_id": "a70fcd8c0037ec4a54ac3ebb7a05229ef1f420cc", "content_id": "89ae757e122d5d90fc26d7bc08374a901bc5a95d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4505, "license_type": "permissive", "max_line_length": 80, "num_lines": 168, "path": "/examples/rl_algos/pg/reacher_reinforce.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Reinforce on Pusher2D3DofGoalCompoEnv.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\n\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn.utils.data_management import SimpleReplayBuffer\nfrom robolearn_gym_envs.pybullet import Reacher2D3DofBulletEnv\n\nfrom robolearn.torch.algorithms.rl_algos.reinforce import Reinforce\n\nfrom robolearn.torch.policies import TanhGaussianPolicy\n\nimport argparse\n\n\ndef experiment(variant):\n ptu.set_gpu_mode(variant['gpu'])\n\n env = NormalizedBoxEnv(\n Reacher2D3DofBulletEnv(**variant['env_params'])\n )\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n net_size = variant['net_size']\n\n # policy = GaussianPolicy(\n policy = TanhGaussianPolicy(\n hidden_sizes=[net_size, net_size],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n if ptu.gpu_enabled():\n policy.cuda()\n\n replay_buffer = SimpleReplayBuffer(\n variant['algo_params']['replay_buffer_size'],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n variant['algo_params']['replay_buffer'] = replay_buffer\n\n # QF Plot\n # variant['algo_params']['epoch_plotter'] = None\n\n algorithm = Reinforce(\n env=env,\n training_env=env,\n save_environment=False,\n policy=policy,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n algorithm.train()\n\n return algorithm\n\n\nPATH_LENGTH = 500\nPATHS_PER_EPOCH = 5\nPATHS_PER_EVAL = 1\n\nexpt_params = dict(\n algo_name=Reinforce.__name__,\n algo_params=dict(\n # Common RLAlgo params\n num_epochs=1000, # n_epochs\n num_steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n num_updates_per_train_call=1, # How to many run algorithm train fcn\n num_steps_per_eval=PATHS_PER_EVAL * PATH_LENGTH,\n # EnvSampler params\n max_path_length=PATH_LENGTH, # max_path_length\n render=False,\n # ReplayBuffer params\n batch_size=64, # batch_size\n replay_buffer_size=1e4,\n # Reinforce params\n # TODO: _epoch_plotter\n policy_lr=3e-4,\n discount=0.99,\n reward_scale=1,\n causality=True,\n discounted=True,\n ),\n net_size=64\n)\n\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nenv_params = dict(\n is_render=False,\n obs_with_img=False,\n rdn_tgt_pos=True,\n tgt_pose=None,\n rdn_robot_config=True,\n robot_config=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n obs_distances=False, # If True obs contain 'distance' vectors instead poses\n tgt_cost_weight=1.0,\n ctrl_cost_weight=1.0e-2,\n use_log_distances=False,\n # use_log_distances=False,\n log_alpha=1e-6,\n tgt_tolerance=0.05,\n max_time=10,\n # max_time=PATH_LENGTH*DT,\n half_env=False,\n)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=50)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--gpu', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n expt_variant = expt_params\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = 'reacher'\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n expt_variant['env_params'] = env_params\n expt_variant['env_params']['is_render'] = args.render\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algo = experiment(expt_variant)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.47450754046440125, "alphanum_fraction": 0.47914251685142517, "avg_line_length": 29.821428298950195, "blob_id": "ae62c54bead9b3106a4390278b10d43f40eb154d", "content_id": "58828abfe23621e86a72924a70df8c6fe6d79dcd", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1726, "license_type": "permissive", "max_line_length": 79, "num_lines": 56, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/costs/cost_sum.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass CostSum(object):\n def __init__(self, costs, weights=None):\n self._costs = costs\n\n if weights is None:\n weights = np.ones(len(self._costs))\n\n self._weights = np.array(weights)\n\n if len(self._costs) != len(self._weights):\n raise AttributeError(\"The number of cost types and weights\"\n \"do not match %d != %d\"\n % (len(self._costs), len(self._weights)))\n\n def eval(self, path):\n\n # Compute weighted sum of each cost value and derivatives for fist cost\n l, lx, lu, lxx, luu, lux = self._costs[0].eval(path)\n weight = self._weights[0]\n l = l * weight\n lx = lx * weight\n lu = lu * weight\n lxx = lxx * weight\n luu = luu * weight\n lux = lux * weight\n\n # Cost composition list\n cost_composition = list()\n cost_composition.append(l.copy())\n\n for i in range(1, len(self._costs)):\n pl, plx, plu, plxx, pluu, plux = self._costs[i].eval(path)\n # print(\"Cost %d: %f\" % (i, sum(pl)))\n weight = self._weights[i]\n\n cost_composition.append(pl*weight)\n\n l = l + pl * weight\n lx = lx + plx * weight\n lu = lu + plu * weight\n lxx = lxx + plxx * weight\n luu = luu + pluu * weight\n lux = lux + plux * weight\n\n # print('lx', lx[-1, :])\n # print('lu', lu[-1, :])\n # print('---')\n # print('lxx', lxx[-1, :, :])\n # print('luu', luu[-1, :, :])\n # print('lxx', np.diag(lxx[-1, :, :]))\n # input('wuuuu')\n\n return l, lx, lu, lxx, luu, lux, cost_composition\n" }, { "alpha_fraction": 0.5304532647132874, "alphanum_fraction": 0.5382436513900757, "avg_line_length": 31.576923370361328, "blob_id": "ca6a36dd7d6e6710f592f966799bc241a638e5e3", "content_id": "210277de0b500e5db13076a52717a3b6e9e5fc90", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4236, "license_type": "permissive", "max_line_length": 101, "num_lines": 130, "path": "/scenarios/tests/algo_with_multi.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport time\nimport threading\nimport numpy as np\nfrom robolearn.old_utils.algo_interface import AlgoInterface\nfrom robolearn.old_envs.manipulator2d.manipulator2d_env import Manipulator2dEnv\nfrom robolearn.old_utils.ros_utils import get_available_port\n\ninit_roscore_port = 11312\ninit_gzserver_port = 11347\n\n\nclass SimpleRLAlgo(object):\n def __init__(self, N, T, ts):\n interface_fcns = [(self.stop, 'stop'), (self.start, 'start'), (self.restart, 'restart'),\n (self.is_running_fcn, 'is_running'), (self.kill_me, 'kill')]\n self.algo_interface = AlgoInterface(interface_fcns)\n self.n_iterations = N\n self.T = T\n self.Ts = ts\n self.is_running = False\n self.is_training = False\n self.is_finished = False\n\n self.total_gz_ros = 2\n\n self.rosgazebos = [None for _ in range(self.total_gz_ros)]\n self.roscore_ports = [None for _ in range(self.total_gz_ros)]\n self.gzserver_ports = [None for _ in range(self.total_gz_ros)]\n\n for ii in range(self.total_gz_ros):\n if ii == 0:\n last_roscore_port = init_roscore_port\n last_gzserver_port = init_gzserver_port\n else:\n last_roscore_port = self.roscore_ports[ii-1] + 1\n last_gzserver_port = self.gzserver_ports[ii-1] + 1\n self.roscore_ports[ii] = get_available_port('localhost', last_roscore_port)\n self.gzserver_ports[ii] = get_available_port('localhost', last_gzserver_port)\n self.rosgazebos[ii] = Manipulator2dEnv('localhost', roscore_port=self.roscore_ports[ii],\n gzserver_port=self.gzserver_ports[ii])\n self.rosgazebos[ii].start()\n\n self.running_thread = threading.Thread(target=self.running, args=[])\n self.running_thread.setDaemon(True)\n self.running_thread.start()\n\n def start(self):\n print(\"This is starting\")\n self.is_running = True\n return True\n\n def running(self):\n while not self.is_finished:\n if self.is_running:\n self.is_training = True\n for nn in range(self.n_iterations):\n if self.is_running is False:\n break\n # Interaction\n for ii in range(self.total_gz_ros):\n self.rosgazebos[ii].reset(time=None, freq=None, cond=0)\n for t in range(self.T):\n if self.is_running is False:\n break\n\n for ii in range(self.total_gz_ros):\n # get obs/state\n print(\"State env[%d]: %s\" % (ii, self.rosgazebos[ii].get_observation()))\n # act\n self.rosgazebos[ii].send_action(np.random.randn(3))\n\n print(\"Iteration %d/%d, time=%d/%d\" % (nn+1, self.n_iterations, t+1, self.T))\n time.sleep(self.Ts)\n # Evaluation\n\n # Update\n\n self.is_training = False\n\n def restart(self):\n print(\"This is restarting\")\n self.stop()\n while self.is_training:\n pass\n\n self.start()\n return True\n\n def stop(self):\n print(\"This is stopping\")\n self.is_running = False\n return True\n\n def is_running_fcn(self):\n print(\"Is this running?: %s \" % self.is_running)\n return self.is_running\n\n def kill_me(self):\n print(\"This is killing itself!!\")\n self.finish()\n for ii in range(self.total_gz_ros):\n self.rosgazebos[ii].stop()\n self.stop()\n del self\n return True\n\n def finish(self):\n self.is_finished = True\n\n\nif __name__ == \"__main__\":\n\n try:\n simple_algo = SimpleRLAlgo(20, 100, 0.2)\n\n simple_algo.start()\n\n while not simple_algo.is_finished:\n pass\n\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n print(\"This algorithm has been finished!!\")\n\n" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 38, "blob_id": "be3a60d83ab70e97e4f311b1d2a37530aa474a59", "content_id": "e8bb647d9b3c8e134c7ffdf6c0b1ccd6abe60d0f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "permissive", "max_line_length": 38, "num_lines": 1, "path": "/robolearn/envs/simple_envs/frozen_lake/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .frozen_lake import FrozenLakeEnv\n" }, { "alpha_fraction": 0.49894553422927856, "alphanum_fraction": 0.5008063316345215, "avg_line_length": 33.9718017578125, "blob_id": "aa57b702cfc4d0eb3a2c93d869353e754db3d63d", "content_id": "e58e121876ec92a9462c140e07683c2f541d280f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16122, "license_type": "permissive", "max_line_length": 84, "num_lines": 461, "path": "/robolearn/torch/policies/tanh_weighted_multi_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn as nn\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import Policy\nfrom collections import OrderedDict\nfrom itertools import chain\n\n\nEPS = 1e-12\n\n\nclass TanhWeightedMultiPolicy(PyTorchModule, Policy):\n \"\"\"\n Usage:\n\n ```\n policy = WeightedTanhMlpMultiPolicy(...)\n action, policy_dict = policy(obs)\n ```\n\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=1e-2,\n output_w_init='xavier_normal',\n output_b_init_val=1e-2,\n pol_output_activation='linear',\n mix_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n epsilon=1e-6,\n softmax_weights=False,\n mixing_temperature=1.,\n **kwargs\n ):\n self.save_init_params(locals())\n PyTorchModule.__init__(self)\n Policy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n\n self._mixing_temperature = mixing_temperature # Hyperparameter for exp.\n self._softmax_weights = softmax_weights\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n mixture_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # ############# #\n # Mixing Layers #\n # ############# #\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n if softmax_weights:\n self.mfc_softmax = nn.Softmax(dim=1)\n else:\n self.mfc_softmax = None\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n\n def get_action(self, obs_np, **kwargs):\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._mixture_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n # Last Layers\n pre_tanh_values_list = \\\n [(\n self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ]\n\n pre_tanh_values = torch.cat(pre_tanh_values_list, dim=1)\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = h.clone()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n mixture_coeff = \\\n self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)\n\n if self.mfc_softmax is not None:\n mixture_coeff = self.mfc_softmax(mixture_coeff)\n\n if torch.isnan(pre_tanh_values).any():\n raise ValueError('Some pre_tanh_values are NAN: %s' %\n pre_tanh_values)\n\n if torch.isnan(mixture_coeff).any():\n raise ValueError('Some mixture coeff(s) is(are) NAN: %s' %\n mixture_coeff)\n\n if pol_idx is None:\n # Calculate weighted output\n\n if optimize_policies:\n pre_tanh_value = torch.sum(\n pre_tanh_values*mixture_coeff,\n dim=1,\n keepdim=False\n )\n else:\n pre_tanh_value = torch.sum(\n pre_tanh_values.detach()*mixture_coeff,\n dim=1,\n keepdim=False\n )\n\n else:\n index = self._pols_idxs[pol_idx]\n pre_tanh_value = \\\n torch.index_select(pre_tanh_values, dim=1, index=index).squeeze(1)\n\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n info_dict = dict(\n pre_tanh_value=pre_tanh_value,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n" }, { "alpha_fraction": 0.5607997179031372, "alphanum_fraction": 0.5706325769424438, "avg_line_length": 30.12244987487793, "blob_id": "7ace7fecf3a1104ce8ccb8add877a593b886e0ef", "content_id": "a5631ef5ff1d41b7f54128248028aa45c34c2bc2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3051, "license_type": "permissive", "max_line_length": 71, "num_lines": 98, "path": "/examples/miscellaneous/test_pol.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nimport robolearn.torch.utils.pytorch_util as ptu\n\nfrom robolearn.torch.policies import TanhGaussianPolicy\nfrom robolearn.torch.policies import TanhMlpPolicy\nfrom robolearn.models.policies import ExplorationPolicy\n\n\nobs_dim = 3\nact_dim = 2\n\n# nn_pol = TanhGaussianPolicy(hidden_sizes=[4],\n# obs_dim=obs_dim,\n# action_dim=act_dim,\n# std=None,\n# hidden_w_init=ptu.xavier_initOLD,\n# hidden_b_init_val=0,\n# output_w_init=ptu.xavier_initOLD,\n# output_b_init_val=0)\n\nnn_pol = TanhMlpPolicy(hidden_sizes=[4],\n obs_dim=obs_dim,\n action_dim=act_dim,\n hidden_w_init=ptu.xavier_initOLD,\n hidden_b_init_val=0,\n output_w_init=ptu.xavier_initOLD,\n output_b_init_val=0)\n# nn_pol = MlpPolicy(hidden_sizes=[4],\n# obs_dim=obs_dim,\n# action_dim=act_dim,\n# hidden_w_init=ptu.xavier_initOLD,\n# hidden_b_init_val=0,\n# output_w_init=ptu.xavier_initOLD,\n# output_b_init_val=0)\n\nprint(\"Policy: '\", TanhGaussianPolicy.__name__, \"' parameters:\")\nfor name, param in nn_pol.named_parameters():\n print('name: ', name, '| shape: ', param.data.shape)\n\n\nobs = torch.rand(obs_dim)\nprint('\\n')\nprint('Evaluate with one obs:')\noutput = nn_pol(obs)\nprint('action: ', output[0])\n\nprint('\\n')\nprint('Evaluate with five obs:')\nobs = torch.rand((5, obs_dim))\noutput = nn_pol(obs)\nprint('actions: ', output[0])\n\nprint('\\n')\nprint('Evaluate with one np_obs:')\nobs = torch.rand(obs_dim).data.numpy()\noutput = nn_pol.get_action(obs)\nprint('action: ', output[0], '| shape: ', output[0].shape)\n\nprint('\\n')\nprint('Evaluate with five np_obs:')\nobs = torch.rand((5, obs_dim)).data.numpy()\noutput = nn_pol.get_actions(obs)\nprint('action: ', output[0], '| shape: ', output[0].shape)\n\n\nprint('\\n')\nprint('Dummy optimization:')\nloss_fn = torch.nn.MSELoss(size_average=False)\nlearning_rate = 1e-2\noptimizer_pol = torch.optim.Adam(nn_pol.parameters(), lr=learning_rate)\ndes_action = torch.ones(act_dim) * 5\n# des_action = torch.ones(act_dim) * 0.5\n\nobs = torch.rand(obs_dim)\n\nfor i in range(500):\n if isinstance(nn_pol, ExplorationPolicy):\n a_pred = nn_pol(obs, deterministic=False)\n else:\n a_pred = nn_pol(obs)\n\n loss = loss_fn(a_pred[0], des_action)\n print('iter: ', i, 'loss=', loss.item())\n\n optimizer_pol.zero_grad()\n loss.backward()\n optimizer_pol.step()\n\n # for name, param in nn_pol.named_parameters():\n # print('name: ', name, '| grad: ', param.grad)\n # input('PIPI')\n\nif isinstance(nn_pol, ExplorationPolicy):\n print('desired:', des_action,\n ' | expected: ', nn_pol(obs, deterministic=True)[0])\nelse:\n print('desired:', des_action,\n ' | expected: ', nn_pol(obs)[0])\n\n" }, { "alpha_fraction": 0.5253456234931946, "alphanum_fraction": 0.5537634491920471, "avg_line_length": 28.590909957885742, "blob_id": "32fbf95c6317e1b236996e2d52ff9b355a8d2319", "content_id": "74c9ac2171f51da4fe70e1d62352f07458e60ab6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1302, "license_type": "permissive", "max_line_length": 95, "num_lines": 44, "path": "/robolearn/torch/utils/ops/gauss_fit_joint_prior.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nTHIS FILE IS ADAPTED FROM FINN'S GPS\n\"\"\"\n\nimport torch\nimport numpy as np\n\n\ndef gauss_fit_joint_prior(pts, mu0, Phi, m, n0, dwts, d1, d2, sig_reg):\n \"\"\"\n Perform Gaussian fit to data with a prior.\n :param pts: (N x dA x dA)\n :param mu0:\n :param Phi:\n :param m:\n :param n0:\n :param dwts:\n :param d1: dimension of first set? E.g. dX\n :param d2: simension of second set? E.g. dU\n :param sig_reg:\n :return:\n \"\"\"\n # Build weights matrix.\n D = torch.diag(dwts)\n # Compute empirical mean and covariance.\n mun = torch.sum((pts.t() * dwts).t(), dim=0)\n diff = pts - mun\n empsig = diff.t().matmul(D).matmul(diff)\n empsig = 0.5 * (empsig + empsig.t())\n # MAP estimate of joint distribution.\n N = dwts.shape[0]\n mu = mun\n sigma = (N * empsig + Phi + (N * m) / (N + m) * torch.ger(mun - mu0, mun - mu0)) / (N + n0)\n sigma = 0.5 * (sigma + sigma.T)\n # Add sigma regularization.\n sigma += sig_reg\n # Conditioning to get dynamics.\n fd = torch.gesv(sigma[:d1, :d1],\n sigma[:d1, d1:d1 + d2]).t()\n fc = mu[d1:d1 + d2] - fd.matmul(mu[:d1])\n dynsig = sigma[d1:d1 + d2, d1:d1 + d2] - \\\n fd.matmul(sigma[:d1, :d1]).matmul(fd.t())\n dynsig = 0.5 * (dynsig + dynsig.t())\n return fd, fc, dynsig\n" }, { "alpha_fraction": 0.8705036044120789, "alphanum_fraction": 0.8705036044120789, "avg_line_length": 38.71428680419922, "blob_id": "8d762c172cc15d54cf56a245aab74c421417597f", "content_id": "270efe184a8820be3709ade282e796e42b8aafcf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "permissive", "max_line_length": 54, "num_lines": 7, "path": "/robolearn/utils/exploration_strategies/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .base import PolicyWrappedWithExplorationStrategy\nfrom .base import RawExplorationStrategy\n\nfrom .epsilon_greedy import EpsilonGreedy\nfrom .ou_strategy import OUStrategy\nfrom .gaussian_strategy import GaussianStrategy\nfrom .smooth_noise_strategy import SmoothNoiseStrategy\n" }, { "alpha_fraction": 0.5938989520072937, "alphanum_fraction": 0.6081982851028442, "avg_line_length": 23.9761905670166, "blob_id": "8bbca50d24e55c6c3895cde40d1fb2d826d1871e", "content_id": "526344333c58c9bcb3d15ae9fa63c0d22d02af93", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1049, "license_type": "permissive", "max_line_length": 93, "num_lines": 42, "path": "/scripts/plot_multigoal_q_fcn.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport argparse\nimport joblib\nimport IPython\n\nfrom robolearn.envs.simple_envs.multigoal_deprecated.multigoal_q_plot_ import QFPolicyPlotter\n\n\ndef main(args):\n data = joblib.load(args.file)\n if args.deterministic:\n print('Using the deterministic version of the _i_policy.')\n policy = data['_i_policy']\n else:\n print('Using the stochastic _i_policy.')\n policy = data['exploration_policy']\n\n qf = data['_i_qf']\n\n # QF Plot\n plotter = QFPolicyPlotter(\n qf=qf,\n policy=policy,\n obs_lst=np.array([[-2.5, 0.0], [0.0, 0.0], [2.5, 2.5]]),\n default_action=[np.nan, np.nan],\n n_samples=100)\n\n plotter.draw()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str,\n help='path to the snapshot file')\n parser.add_argument('--deterministic', action=\"store_true\")\n\n args = parser.parse_args()\n main(args)\n\n input('Press a key to close the script...')\n\n IPython.embed()\n" }, { "alpha_fraction": 0.6578072905540466, "alphanum_fraction": 0.6677740812301636, "avg_line_length": 34.35293960571289, "blob_id": "973f3b9ca0df376b370cc362be0a3bc83d602a59", "content_id": "34b91620499a6ef98bf3737bbeb90b7583f77080", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "permissive", "max_line_length": 76, "num_lines": 17, "path": "/scenarios/tests/plots/duals.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom builtins import input\nfrom robolearn.old_utils.plots.duals import plot_duals\n\nmethod = 'gps' # 'gps' or 'trajopt'\ngps_directory_names = ['reacher_log']#, 'reacher_log2', 'reacher_log3']\ngps_models_labels = ['gps1']#, 'gps2', 'gps3']\nitr_to_load = None # list(range(8))\nblock = False\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\nplot_duals(dir_names, itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels, block=block)\n\ninput('Showing plots. Press a key to close...')\n\n" }, { "alpha_fraction": 0.8888888955116272, "alphanum_fraction": 0.8888888955116272, "avg_line_length": 62, "blob_id": "852786cee9e5f44db43d317917ac7c3f14c7915e", "content_id": "0022f4ea413509543323e78a97320d5e7a03e44d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "permissive", "max_line_length": 62, "num_lines": 1, "path": "/robolearn/models/transitions/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.models.transitions.transition import Transition\n" }, { "alpha_fraction": 0.5114121437072754, "alphanum_fraction": 0.5357208847999573, "avg_line_length": 30.270793914794922, "blob_id": "ef814ecb482c5a0f7b646b730c45b9e9193a51cf", "content_id": "5b9c2e165f679dc2645f2547809170973e4dde96", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16167, "license_type": "permissive", "max_line_length": 119, "num_lines": 517, "path": "/examples/rl_algos/gps/centauro_gps.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom robolearn_gym_envs.pybullet import CentauroObstacleEnv\n\nfrom robolearn.v010.costs.cost_action import CostAction\nfrom robolearn.v010.costs.cost_state import CostState\nfrom robolearn.v010.costs.cost_sum import CostSum\n\nfrom robolearn.v010.utils.sample.sample import Sample\nfrom robolearn.v010.utils.sample.sample_list import SampleList\n\nfrom robolearn.v010.policies.lin_gauss_policy import LinearGaussianPolicy\nfrom robolearn.v010.policies.lin_gauss_init import init_pd\nfrom robolearn.v010.policies.lin_gauss_init import init_lqr\n\nfrom robolearn.v010.costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\n\nfrom robolearn.v010.utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.v010.utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\n\nfrom robolearn.v010.agents.agent_utils import generate_noise\n\nnp.set_printoptions(suppress=True)\n\nseed = 10\nnp.random.seed(seed)\n\nNiter = 30\nTend = 1.0\n# Tend = 0.1\nTs = 0.01\nT = int(Tend/Ts)\nNrollouts = 4\n\n# noisy = False\nnoisy = True\neta = 0.5\n\nmax_initial_state_var = 1e-2\n\nrender = False\n# render = True\ngoal_tolerance = 0.02\nSIM_TIMESTEP = 0.01\nframe_skip = int(Ts/SIM_TIMESTEP)\n\n# Environment\nenv = CentauroObstacleEnv(\n is_render=render,\n obs_with_img=False,\n active_joints='RA',\n control_mode='tasktorque',\n # _control_mode='velocity',\n sim_timestep=SIM_TIMESTEP,\n frame_skip=frame_skip,\n obs_distances=True,\n goal_tolerance=goal_tolerance,\n max_time=None,\n)\n\n# Initial condition\nenv_condition = np.array([0.02, 0.25, 00.0, 0.08, 0.6, 0.0])\nenv_condition[2] = np.deg2rad(env_condition[2])\nenv_condition[5] = np.deg2rad(env_condition[5])\nenv.add_tgt_obst_init_cond(tgt_state=env_condition[:3],\n obst_state=env_condition[3:])\nenv.update_init_conds()\ndX = env.obs_dim\ndU = env.action_dim\n\n# Noise\nnoise_hyperparams = dict(\n smooth_noise=True,\n smooth_noise_var=2.0e+0,\n smooth_noise_renormalize=True,\n # noise_var_scale=1.e-4*np.array([1., 1., 1., 1., 1., 1., 1.]),\n noise_var_scale=1.,\n)\n\n# Policy\nTORQUE_GAINS = np.array([1.0, 1.0, 1.0, 0.5, 0.1, 0.2, 0.001])\ninit_policy_hyperparams = {\n 'init_gains': 1.0 / TORQUE_GAINS,\n 'init_acc': np.zeros(7),\n 'init_var': 1.0,\n 'stiffness': 1.0,\n 'stiffness_vel': 0.5,\n 'final_weight': 100.0,\n 'dt': Ts,\n 'T': T,\n 'x0': np.array([0., 0.3, 0.8, 0.8, 0., 0.8, 0.,\n 0., 0., 0., 0., 0., 0., 0.,\n 0.2095, 0.3503, -0.0584, -0., 0., -0.]),\n 'dX': dX,\n 'dU': dU,\n}\npolicy = init_lqr(init_policy_hyperparams)\n\n# Dynamics\ndynamics_hyperparams = dict(\n regularization=1e-6,\n prior={\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n # 'strength': 1.0, # Adjusts the strength of the prior.\n },\n)\ndynamics = DynamicsLRPrior(dynamics_hyperparams)\n\n\n# Reward Fcn\naction_cost = {\n 'type': CostAction,\n 'wu': 1e-3 / TORQUE_GAINS,\n 'target': None, # Target action value\n}\n\nl2_l1_weights = np.array([1.0, 0.1])\n# target_state = np.array([0., 0.3, 0.8, 0.8, 0., 0.8, 0.])\n# target_state += np.array([0.2, -0.2, 0.3, -0.3, 0.1, 0., 0.])\n# # target_state = np.array([0., 0., 0., -0.3, 0., 0., 0.])\n# state_cost_distance = {\n# 'type': CostState,\n# 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n# 'l2': l2_l1_weights[0], # Weight for l2 norm\n# 'l1': l2_l1_weights[1], # Weight for l1 norm\n# 'alpha': 1.e-2, # Constant added in square root in l1 norm\n# 'wp_final_multiplier': 1.0e0, # Weight multiplier on final time step.\n# 'data_types': {\n# 'position': {\n# 'wp': np.array([1., 1., 1., 1., 1., 1., 1.]), # State weights - must be set.\n# 'target_state': target_state, # Target state - must be set.\n# 'average': None,\n# 'data_idx': env.get_state_info(name='position')['idx']\n# },\n# },\n# }\ntarget_state = np.array([0., 0., 0., 0., 0., 0.])\n# target_state = np.array([0., 0., 0., -0.3, 0., 0., 0.])\nstate_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'l2': l2_l1_weights[0], # Weight for l2 norm\n 'l1': l2_l1_weights[1], # Weight for l1 norm\n 'alpha': 1.e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0e0, # Weight multiplier on final time step.\n 'data_types': {\n 'target': {\n 'wp': np.array([1., 1., 1., 0., 0., 0.]), # State weights - must be set.\n 'target_state': target_state, # Target state - must be set.\n 'average': None,\n 'data_idx': env.get_state_info(name='target')['idx']\n },\n },\n}\nfinal_state_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'l2': l2_l1_weights[0], # Weight for l2 norm\n 'l1': l2_l1_weights[1], # Weight for l1 norm\n 'alpha': 1.e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0e0, # Weight multiplier on final time step.\n 'data_types': {\n 'target': {\n 'wp': np.array([1., 1., 1., 0.1, 0.1, 0.1]), # State weights - must be set.\n 'target_state': target_state, # Target state - must be set.\n 'average': None,\n 'data_idx': env.get_state_info(name='target')['idx']\n },\n },\n}\n\nall_costs = [\n action_cost,\n state_cost_distance,\n final_state_cost_distance,\n]\nall_weights = [\n1.e+3,\n# 1.e+0,\n3.e+1,\n3.e+3,\n]\n\ncost_sum_hyperparams = dict(\n costs=all_costs,\n weights=all_weights,\n\n)\ncost_fcn = CostSum(cost_sum_hyperparams)\n\niter_costs = np.zeros((Niter+1))\n\n# ############### #\n# Evaluate Policy #\n# ############### #\n# Sample and show cost\nnoise = np.zeros((T, dU))\nsample = Sample(env, T)\nall_actions = np.zeros((T, dU))\nall_states = np.zeros((T, dX))\nall_obs = np.zeros((T, dX))\nobs0 = env.reset()\nfor t in range(T):\n state = env.get_state()# - obs0\n obs = env.get_observation()# - obs0\n action = policy.eval(state.copy(), obs.copy(),\n t, noise[t].copy())\n env.step(action)\n all_states[t, :] = state\n all_obs[t, :] = obs\n all_actions[t, :] = action\nsample.set_acts(all_actions)\nsample.set_obs(all_obs)\nsample.set_states(all_states)\nsample.set_noise(noise)\ncost_output = cost_fcn.eval(sample)\niter_costs[0] = np.sum(cost_output[0])\n\nfor ii in range(Niter):\n\n # All samples in iteration\n interaction_samples = list()\n\n # Sample from environment\n for rr in range(Nrollouts):\n print('Iter %02d' % ii, ' | ', 'Rollout:%02d' % rr)\n sample = Sample(env, T)\n all_actions = np.zeros((T, dU))\n all_states = np.zeros((T, dX))\n all_obs = np.zeros((T, dX))\n\n if noisy:\n noise = generate_noise(T, dU, noise_hyperparams)\n else:\n noise = np.zeros((T, dU))\n\n # Reset\n obs0 = env.reset()\n for t in range(T):\n state = env.get_state()# - obs0\n obs = env.get_observation()# - obs0\n action = policy.eval(state, obs, t, noise[t])\n env.step(action)\n # if t == 0:\n # print('****')\n # print(noise[t])\n # print('****')\n\n all_states[t, :] = state\n all_obs[t, :] = obs\n all_actions[t, :] = action\n\n sample.set_acts(all_actions)\n sample.set_obs(all_obs)\n sample.set_states(all_states)\n sample.set_noise(noise)\n interaction_samples.append(sample)\n\n # Samp\n sample_list = SampleList(interaction_samples)\n\n # Fit Dynamics\n print('****'*2)\n print('FITTING DYNAMICS...')\n print('****'*2)\n cur_data = sample_list\n X = cur_data.get_states()\n U = cur_data.get_actions()\n\n # Update prior and fit dynamics.\n dynamics.update_prior(cur_data)\n dynamics.fit(X, U)\n\n # Fit x0mu/x0sigma.\n x0 = X[:, 0, :]\n x0mu = np.mean(x0, axis=0)\n x0mu = x0mu\n x0sigma = \\\n np.diag(np.maximum(np.var(x0, axis=0), max_initial_state_var))\n\n prior = dynamics.get_prior()\n if prior:\n mu0, Phi, priorm, n0 = prior.initial_state()\n N = len(cur_data)\n x0sigma += \\\n Phi + (N*priorm) / (N+priorm) * \\\n np.outer(x0mu-mu0, x0mu-mu0) / (N+n0)\n\n # Eval Samples Reward\n print('****'*2)\n print('EVALUATING COST...')\n print('****'*2)\n cs = np.zeros((Nrollouts, T))\n cc = np.zeros((Nrollouts, T))\n cv = np.zeros((Nrollouts, T, dX+dU))\n Cm = np.zeros((Nrollouts, T, dX+dU, dX+dU))\n cost_composition = [None for _ in range(Nrollouts)]\n\n for n in range(Nrollouts):\n sample = sample_list[n]\n l, lx, lu, lxx, luu, lux, cost_composition[n] = cost_fcn.eval(sample)\n\n # True value of cost\n cs[n, :] = l\n\n # Constant term\n cc[n, :] = l\n\n # Assemble matrix and vector.\n cv[n, :, :] = np.c_[lx, lu]\n Cm[n, :, :, :] = np.concatenate(\n (np.c_[lxx, np.transpose(lux, [0, 2, 1])], np.c_[lux, luu]),\n axis=1\n )\n\n # Adjust for expanding cost around a sample.\n X = sample.get_states()\n U = sample.get_acts()\n yhat = np.c_[X, U]\n rdiff = -yhat\n rdiff_expand = np.expand_dims(rdiff, axis=2)\n cv_update = np.sum(Cm[n, :, :, :] * rdiff_expand, axis=1)\n cc[n, :] += np.sum(rdiff * cv[n, :, :], axis=1) \\\n + 0.5 * np.sum(rdiff * cv_update, axis=1)\n cv[n, :, :] += cv_update\n\n # Reward estimate.\n cc = np.mean(cc, axis=0) # Constant term (scalar).\n cv = np.mean(cv, axis=0) # Linear term (vector).\n Cm = np.mean(Cm, axis=0) # Quadratic term (matrix).\n\n # Consider KL divergence\n multiplier = 0\n PKLm = np.zeros((T, dX+dU, dX+dU))\n PKLv = np.zeros((T, dX+dU))\n fCm = np.zeros(Cm.shape)\n fcv = np.zeros(cv.shape)\n for t in range(T):\n # Policy KL-divergence terms.\n inv_pol_S = np.linalg.solve(\n policy.chol_pol_covar[t, :, :],\n np.linalg.solve(policy.chol_pol_covar[t, :, :].T, np.eye(dU))\n )\n KB = policy.K[t, :, :]\n kB = policy.k[t, :]\n PKLm[t, :, :] = np.vstack([\n np.hstack([KB.T.dot(inv_pol_S).dot(KB), -KB.T.dot(inv_pol_S)]),\n np.hstack([-inv_pol_S.dot(KB), inv_pol_S])\n ])\n PKLv[t, :] = np.concatenate([\n KB.T.dot(inv_pol_S).dot(kB), -inv_pol_S.dot(kB)\n ])\n fCm[t, :, :] = (Cm[t, :, :] + PKLm[t, :, :] * eta) / (eta + multiplier)\n fcv[t, :] = (cv[t, :] + PKLv[t, :] * eta) / (eta + multiplier)\n\n # Backward Pass\n print('****'*2)\n print('UPDATING POLICY...')\n print('****'*2)\n\n # print(cv[:, :7])\n # input('fadsfds')\n\n idx_x = slice(dX)\n idx_u = slice(dX, dX+dU)\n\n Fm = dynamics.Fm\n fv = dynamics.fv\n\n # Allocate.\n Vxx = np.zeros((T, dX, dX))\n Vx = np.zeros((T, dX))\n Qtt = np.zeros((T, dX+dU, dX+dU))\n Qt = np.zeros((T, dX+dU))\n\n new_K = np.zeros((T, dU, dX))\n new_k = np.zeros((T, dU))\n new_pS = np.zeros((T, dU, dU))\n new_ipS = np.zeros((T, dU, dU))\n new_cpS = np.zeros((T, dU, dU))\n\n for t in range(T - 1, -1, -1):\n # Add in the cost.\n Qtt[t] = fCm[t, :, :] # (X+U) x (X+U)\n Qt[t] = fcv[t, :] # (X+U) x 1\n # print('Qt', Qt[t])\n # input(\"fjdsalfsdf\")\n\n # Add in the value function from the next time step.\n if t < T - 1:\n multiplier = 1.0\n\n Qtt[t] += Fm[t, :, :].T.dot(Vxx[t+1, :, :]).dot(Fm[t, :, :])\n Qt[t] += Fm[t, :, :].T.dot(Vx[t+1, :] +\n Vxx[t+1, :, :].dot(fv[t, :]))\n\n # Symmetrize quadratic component.\n Qtt[t] = 0.5 * (Qtt[t] + Qtt[t].T)\n\n inv_term = Qtt[t, idx_u, idx_u] # Quu\n k_term = Qt[t, idx_u] # Qu\n K_term = Qtt[t, idx_u, idx_x] # Qxu\n # # For cos_per_step\n # inv_term = Qtt[t, idx_u, idx_u] + policy.inv_pol_covar[t]\n # k_term = Qt[t, idx_u] - policy.inv_pol_covar[t].dot(policy.k[t])\n # K_term = Qtt[t, idx_u, idx_x] - policy.inv_pol_covar[t].dot(policy.K[t])\n\n # Compute Cholesky decomposition of Q function action component.\n U = sp.linalg.cholesky(inv_term)\n L = U.T\n\n # Update the Trajectory Distribution Parameters\n # Store conditional covariance, inverse, and Cholesky.\n new_ipS[t, :, :] = inv_term # Quu\n new_pS[t, :, :] = sp.linalg.solve_triangular(\n U, sp.linalg.solve_triangular(L, np.eye(dU), lower=True)\n ) # Quu^-1\n new_cpS[t, :, :] = sp.linalg.cholesky(\n new_pS[t, :, :]\n )\n\n # Compute mean terms.\n # print(policy.k[t, :])\n new_k[t, :] = -sp.linalg.solve_triangular(\n U, sp.linalg.solve_triangular(L, k_term, lower=True)\n )\n new_K[t, :, :] = -sp.linalg.solve_triangular(\n U, sp.linalg.solve_triangular(L, K_term, lower=True)\n )\n # if t == 0:\n # print(np.round(new_K[t, :7, :7], 2))\n # print('--')\n # print(np.round(new_k[t, :7], 2))\n # input('dfasf')\n # else:\n # print(np.round(new_k[t, :7], 2))\n\n # Compute value function.\n Vxx[t, :, :] = Qtt[t, idx_x, idx_x] + \\\n Qtt[t, idx_x, idx_u].dot(new_K[t, :, :])\n Vx[t, :] = Qt[t, idx_x] + \\\n Qtt[t, idx_x, idx_u].dot(new_k[t, :])\n\n # # Option: cons_per_step or not upd_bw\n # Vxx[t, :, :] = Qtt[t, idx_x, idx_x] + \\\n # policy.K[t].T.dot(Qtt[t, idx_u, idx_u]).dot(policy.K[t]) + \\\n # (2 * Qtt[t, idx_x, idx_u]).dot(policy.K[t, :, :])\n # Vx[t, :] = Qt[t, idx_x].T + \\\n # Qt[t, idx_u].T.dot(policy.K[t]) + \\\n # policy.k[t].T.dot(Qtt[t, idx_u, idx_u]).dot(policy.K[t]) + \\\n # Qtt[t, idx_x, idx_u].dot(policy.k[t, :])\n\n # Symmetrize quadratic component.\n Vxx[t, :, :] = 0.5 * (Vxx[t, :, :] + Vxx[t, :, :].T)\n\n # Now update\n policy.K = new_K\n policy.k = new_k\n policy.pol_covar = new_pS\n policy.inv_pol_covar = new_ipS\n policy.chol_pol_covar = new_cpS\n\n # ############### #\n # Evaluate Policy #\n # ############### #\n # Sample and show cost\n noise = np.zeros((T, dU))\n sample = Sample(env, T)\n all_actions = np.zeros((T, dU))\n all_states = np.zeros((T, dX))\n all_obs = np.zeros((T, dX))\n\n obs0 = env.reset()\n for t in range(T):\n state = env.get_state()# - obs0\n obs = env.get_observation()# - obs0\n action = policy.eval(state.copy(), obs.copy(),\n t, noise[t].copy())\n env.step(action)\n\n all_states[t, :] = state\n all_obs[t, :] = obs\n all_actions[t, :] = action\n sample.set_acts(all_actions)\n sample.set_obs(all_obs)\n sample.set_states(all_states)\n sample.set_noise(noise)\n\n cost_output = cost_fcn.eval(sample)\n print('***\\n' * 5)\n iter_costs[ii+1] = np.sum(cost_output[0])\n print('SAMPLE COST', np.sum(cost_output[0]))\n print('***\\n' * 5)\n\nplt.plot(iter_costs[1:])\nplt.show(block=False)\n\n# input(\"Press a key to start sampling\")\n# resample = True\n# while resample:\n# env.set_rendering(True)\n# obs0 = env.reset()\n# for t in range(T):\n# state = env.get_state()# - obs0\n# obs = env.get_observation()# - obs0\n# action = policy.eval(state.copy(), obs.copy(),\n# t, noise[t].copy())\n# env.step(action)\n# answer = input('Do you want to finish the script? (y/Y)')\n# if answer.lower == 'y':\n# resample = False\n#\ninput('Press a key to close the script')\n" }, { "alpha_fraction": 0.5604395866394043, "alphanum_fraction": 0.5644881725311279, "avg_line_length": 39.01388931274414, "blob_id": "c2e0bb698e4a6ecaf32be5f04e8f70bc4e46495e", "content_id": "78c211d0a1130c894918b9dcaf3446f2fa214982", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8645, "license_type": "permissive", "max_line_length": 119, "num_lines": 216, "path": "/robolearn/torch/algorithms/rl_algos/gps/gps/utils.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.ndimage as sp_ndimage\nfrom robolearn.algorithms.rl_algos import LinearGaussianPolicy\n\n\ndef gauss_fit_joint_prior(pts, mu0, Phi, m, n0, dwts, dX, dU, sig_reg,\n max_var=None):\n \"\"\"\n Perform Gaussian fit to data with a prior.\n :param pts:\n :param mu0:\n :param Phi:\n :param m:\n :param n0:\n :param dwts:\n :param dX:\n :param dU:\n :param sig_reg: Sigma regularization\n :return:\n \"\"\"\n # Build weights matrix.\n D = np.diag(dwts)\n\n # Compute empirical mean and covariance.\n mun = np.sum((pts.T * dwts).T, axis=0)\n diff = pts - mun\n empsig = diff.T.dot(D).dot(diff)\n empsig = 0.5 * (empsig + empsig.T)\n\n # MAP estimate of joint distribution.\n N = dwts.shape[0]\n mu = mun\n sigma = (N * empsig + Phi + (N * m) / (N + m) * np.outer(mun - mu0, mun - mu0)) / (N + n0)\n sigma = 0.5 * (sigma + sigma.T)\n\n if max_var is not None:\n sigma = np.clip(sigma, -max_var, max_var)\n\n # Add sigma regularization.\n sigma += sig_reg\n\n # Conditioning to get dynamics.\n fd = np.linalg.solve(sigma[:dX, :dX], sigma[:dX, dX:dX+dU]).T\n fc = mu[dX:dX+dU] - fd.dot(mu[:dX])\n dynsig = sigma[dX:dX+dU, dX:dX+dU] - fd.dot(sigma[:dX, :dX]).dot(fd.T)\n dynsig = 0.5 * (dynsig + dynsig.T)\n\n return fd, fc, dynsig\n\n\ndef generate_noise(T, dU, hyperparams):\n \"\"\"\n Generate a T x dU gaussian-distributed noise vector. This will\n approximately have mean 0 and variance noise_var_scale, ignoring smoothing.\n\n Args:\n T: Number of time steps.\n dU: Dimensionality of actions.\n Hyperparams:\n smooth: Whether or not to perform smoothing of noise.\n var : If smooth=True, applies a Gaussian filter with this\n variance.\n renorm : If smooth=True, renormalizes data to have variance 1\n after smoothing.\n \"\"\"\n smooth, var = hyperparams['smooth_noise'], hyperparams['smooth_noise_var']\n renorm = hyperparams['smooth_noise_renormalize']\n\n if 'noise_var_scale' not in hyperparams:\n hyperparams['noise_var_scale'] = 1\n\n if not issubclass(type(hyperparams['noise_var_scale']), list) and \\\n not issubclass(type(hyperparams['noise_var_scale']), np.ndarray):\n scale = np.tile(hyperparams['noise_var_scale'], dU)\n elif len(hyperparams['noise_var_scale']) == dU:\n scale = hyperparams['noise_var_scale']\n else:\n raise TypeError(\"noise_var_scale size (%d) does not match dU (%d)\" % (len(hyperparams['noise_var_scale']), dU))\n\n # np.random.seed(5)\n\n # Generate noise and scale\n noise = np.random.randn(T, dU)\n\n temp_labels = list()\n temp_noise_list = list()\n temp_noise_list.append(noise.copy())\n temp_labels.append('Noise')\n\n if smooth:\n # Smooth noise. This violates the controller assumption, but\n # might produce smoother motions.\n for i in range(dU):\n noise[:, i] = sp_ndimage.filters.gaussian_filter(noise[:, i], var)\n temp_noise_list.append(noise.copy())\n temp_labels.append('Filtered')\n if renorm:\n variance = np.var(noise, axis=0)\n noise = noise * np.sqrt(scale) / np.sqrt(variance)\n\n temp_noise_list.append(noise.copy())\n temp_labels.append('Renorm')\n\n else:\n noise = noise*np.sqrt(scale)\n\n # plot_multi_info(temp_noise_list, block=True, cols=2, legend=True,\n # labels=temp_labels)\n\n return noise\n\n\nclass BundleType(object):\n \"\"\"\n This class bundles many fields, similar to a record or a mutable\n namedtuple.\n \"\"\"\n def __init__(self, variables):\n for var, val in variables.items():\n object.__setattr__(self, var, val)\n\n # Freeze fields so new ones cannot be set.\n def __setattr__(self, key, value):\n if not hasattr(self, key):\n raise AttributeError(\"%r has no attribute %s\" % (self, key))\n object.__setattr__(self, key, value)\n\n\nclass IterationData(BundleType):\n \"\"\" Collection of iteration variables. \"\"\"\n def __init__(self):\n variables = {\n 'sample_list': None, # List of samples for the current iteration.\n 'traj_info': None, # Current TrajectoryInfo object.\n 'pol_info': None, # Current PolicyInfo object.\n 'traj_distr': None, # Current trajectory distribution. \\bar{p}_i(u_t|x_t)\n 'new_traj_distr': None, # Updated trajectory distribution. p_i(u_t|x_t)\n 'cs': None, # Sample costs of the current iteration.\n 'cost_compo': None, # Sample cost compositions of the current iteration.\n 'eta': 1.0, # Dual variable used in LQR backward pass.\n 'omega': 0.0, # Dual variable used in LQR backward pass (Dualism).\n 'nu': 0.0, # Dual variable used in LQR backward pass (Dualism).\n 'step_mult': 1.0, # KL step multiplier for the current iteration.\n 'good_step_mult': 1.0, # KL good multiplier for the current iteration (Dualism).\n 'bad_step_mult': 1.0, # KL bad multiplier for the current iteration (Dualism).\n 'good_traj_distr': None, # Good traj_distr (Dualism).\n 'bad_traj_distr': None, # Bad traj_distr (Dualism).\n }\n BundleType.__init__(self, variables)\n\n\nclass TrajectoryInfo(BundleType):\n \"\"\" Collection of trajectory-related variables. \"\"\"\n def __init__(self):\n variables = {\n 'dynamics': None, # Dynamics object for the current iteration.\n 'x0mu': None, # Mean for the initial state, used by the dynamics.\n 'x0sigma': None, # Covariance for the initial state distribution.\n 'cc': None, # Cost estimate constant term.\n 'cv': None, # Cost estimate vector term.\n 'Cm': None, # Cost estimate matrix term.\n 'last_kl_step': float('inf'), # KL step of the previous iteration.\n }\n BundleType.__init__(self, variables)\n\n\nclass PolicyInfo(BundleType):\n \"\"\" Collection of policy-related variables. \"\"\"\n def __init__(self, **hyperparams):\n T, dU, dX = hyperparams['T'], hyperparams['dU'], hyperparams['dX']\n variables = {\n 'lambda_k': np.zeros((T, dU)), # Dual variable (Lagrange multiplier vectors) for k.\n 'lambda_K': np.zeros((T, dU, dX)), # Dual variables (Lagrange multiplier vectors) for K.\n 'pol_wt': hyperparams['init_pol_wt'] * np.ones(T), # Policy weight.\n 'pol_mu': None, # Mean of the current policy output.\n 'pol_sig': None, # Covariance of the current policy output.\n 'pol_K': np.zeros((T, dU, dX)), # Policy linearization K matrix.\n 'pol_k': np.zeros((T, dU)), # Policy linearization k vector.\n 'pol_S': np.zeros((T, dU, dU)), # Policy linearization covariance.\n 'chol_pol_S': np.zeros((T, dU, dU)), # Policy linearization Cholesky decomp of covar.\n 'prev_kl': None, # Previous KL divergence.\n 'init_kl': None, # The initial KL divergence, before the iteration.\n 'policy_samples': [], # List of current policy samples.\n 'policy_prior': None, # Current prior for policy linearization.\n }\n BundleType.__init__(self, variables)\n\n def traj_distr(self):\n \"\"\"\n Create a trajectory distribution object from policy info\n (Policy linearization)\n \"\"\"\n T, dU, dX = self.pol_K.shape\n # Compute inverse policy covariances.\n inv_pol_S = np.empty_like(self.chol_pol_S)\n for t in range(T):\n inv_pol_S[t, :, :] = \\\n np.linalg.solve(self.chol_pol_S[t, :, :],\n np.linalg.solve(self.chol_pol_S[t, :, :].T,\n np.eye(dU)))\n return LinearGaussianPolicy(self.pol_K, self.pol_k, self.pol_S,\n self.chol_pol_S, inv_pol_S)\n\n\nclass DualityInfo(BundleType):\n \"\"\" Collection of duality-trajectory-related variables. \"\"\"\n def __init__(self):\n variables = {\n 'sample_list': None,\n 'samples_cost': None,\n 'traj_cost': None,\n 'traj_dist': None,\n 'pol_info': None, # Policy-related PolicyInfo object.\n 'experience_buffer': None,\n }\n BundleType.__init__(self, variables)\n\n\n" }, { "alpha_fraction": 0.5981407761573792, "alphanum_fraction": 0.6037184596061707, "avg_line_length": 26.751842498779297, "blob_id": "2c0397652c61430973bfdb780729cb42642053b2", "content_id": "70edb8134e05790539dd7381f091c778a3fdc4f9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11295, "license_type": "permissive", "max_line_length": 90, "num_lines": 407, "path": "/robolearn/torch/utils/pytorch_util.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\n\nfrom torch.autograd import Variable as TorchVariable\nimport torch.nn as nn\n\n\ndef seed(seed):\n torch.cuda.manual_seed(seed)\n r_generator = torch.manual_seed(seed)\n # if torch.cuda.is_available():\n # torch.cuda.manual_seed_all(seed)\n return r_generator\n\n\ndef soft_update_from_to(source, target, tau):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + source_param.data * tau\n )\n\n\ndef copy_model_params_from_to(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)\n\n\ndef identity(x):\n return x\n\n\ndef fill(tensor, value):\n with torch.no_grad():\n return tensor.fill_(value)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\n\"\"\"\nGPU wrappers\n\"\"\"\n_use_gpu = False\ndevice = None\n\n\ndef set_gpu_mode(mode, gpu_id=0):\n global _use_gpu\n global device\n global _gpu_id\n _gpu_id = gpu_id\n _use_gpu = mode\n device = torch.device(\"cuda:\" + str(gpu_id) if _use_gpu else \"cpu\")\n\n\ndef gpu_enabled():\n return _use_gpu\n\n\ndef set_device(gpu_id):\n torch.cuda.set_device(gpu_id)\n\n\n\"\"\"\nTorch Tensors\n\"\"\"\n\n\n# noinspection PyPep8Naming\ndef FloatTensor(*args, **kwargs):\n return torch.FloatTensor(*args, **kwargs).to(device)\n\n\ndef zeros(*sizes, **kwargs):\n return torch.zeros(*sizes, **kwargs).to(device)\n\n\ndef ones(*sizes, **kwargs):\n return torch.ones(*sizes, **kwargs).to(device)\n\n\ndef zeros_like(*args, **kwargs):\n return torch.zeros_like(*args, **kwargs).to(device)\n\n\ndef ones_like(*args, **kwargs):\n return torch.ones_like(*args, **kwargs).to(device)\n\n\ndef eye(*sizes, **kwargs):\n return torch.eye(*sizes, **kwargs).to(device)\n\n\ndef rand(*args, **kwargs):\n return torch.rand(*args, **kwargs).to(device)\n\n\ndef randn(*args, **kwargs):\n return torch.randn(*args, **kwargs).to(device)\n\n\ndef arange(*args, **kwargs):\n return torch.arange(*args, **kwargs).to(device)\n\n\n\"\"\"\nTorch-Numpy functions\n\"\"\"\n\n\ndef from_numpy(ndarray, requires_grad=False):\n return torch.from_numpy(ndarray).float().to(device).requires_grad_(requires_grad)\n\n\ndef get_numpy(tensor):\n if isinstance(tensor, torch.Tensor):\n return tensor.to('cpu').detach().numpy()\n else:\n return np.array(tensor)\n\n\ndef torch_ify(np_array_or_other):\n if isinstance(np_array_or_other, np.ndarray):\n return from_numpy(np_array_or_other, requires_grad=True)\n else:\n return np_array_or_other\n\n\ndef np_ify(tensor_or_other):\n if isinstance(tensor_or_other, TorchVariable):\n return get_numpy(tensor_or_other)\n else:\n return tensor_or_other\n\n\n\"\"\"\nTensor initialization\n\"\"\"\n\n\ndef zeros_init(tensor):\n with torch.no_grad():\n return tensor.zero_()\n\n\ndef fanin_init(tensor):\n \"\"\"\n Fan-in initialization.\n Args:\n tensor:\n\n Returns:\n\n \"\"\"\n size = tensor.size()\n if len(size) == 2:\n fan_in = size[0]\n elif len(size) > 2:\n fan_in = np.prod(size[1:])\n else:\n raise Exception(\"Shape must be have dimension at least 2.\")\n bound = 1. / np.sqrt(fan_in)\n return tensor.data.uniform_(-bound, bound)\n\n\ndef fanin_init_weights_like(tensor):\n size = tensor.size()\n if len(size) == 2:\n fan_in = size[0]\n elif len(size) > 2:\n fan_in = np.prod(size[1:])\n else:\n raise Exception(\"Shape must be have dimension at least 2.\")\n bound = 1. / np.sqrt(fan_in)\n new_tensor = FloatTensor(tensor.size())\n new_tensor.data.uniform_(-bound, bound)\n return new_tensor\n\n\ndef xavier_initOLD(tensor, gain=1, uniform=True):\n if uniform:\n return xavier_uniform_init(tensor, gain)\n else:\n return xavier_norm_init(tensor, gain)\n\n\ndef xavier_norm_init(tensor, gain=1):\n return torch.nn.init.xavier_normal_(tensor, gain=gain)\n\n\ndef xavier_uniform_init(tensor, gain=1):\n return torch.nn.init.xavier_uniform_(tensor, gain=gain)\n\n\ndef layer_init(layer, option='xavier_normal', activation='relu', b=0.01):\n if option.lower().startswith('xavier'):\n init_weight_xavier(layer=layer, option=option, activation=activation)\n elif option.lower().startswith('uniform'):\n init_weight_uniform(layer=layer, activation=activation)\n elif option.lower().startswith('normal'):\n init_weight_normal(layer=layer, activation=activation)\n else:\n raise ValueError(\"Wrong init option\")\n\n if hasattr(layer, 'bias'):\n fill(layer.bias, b)\n\n\ndef init_weight_uniform(layer, activation='1e-3'):\n if isinstance(activation, float):\n a = -activation\n b = activation\n else:\n a = -1.e-3\n b = 1.e-3\n nn.init.uniform_(layer.weight, a=a, b=b)\n\n\ndef init_weight_normal(layer, activation='1e2'):\n if isinstance(activation, float):\n std = activation\n else:\n std = 1.e-3\n nn.init.normal_(layer.weight, mean=0., std=std)\n\n\ndef init_weight_xavier(layer, option='xavier_normal', activation='relu'):\n if option == 'xavier_normal':\n xavier_fcn = nn.init.xavier_normal_\n elif option == 'xavier_uniform':\n xavier_fcn = nn.init.xavier_uniform_\n else:\n raise ValueError(\"Wrong init option\")\n\n if activation.lower() in ['relu']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('relu')\n )\n elif activation in ['leaky_relu']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('leaky_relu')\n )\n elif activation.lower() in ['tanh']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('tanh')\n )\n elif activation.lower() in ['sigmoid']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('sigmoid')\n )\n elif activation.lower() in ['linear']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('linear')\n )\n elif activation.lower() in ['elu']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('relu')\n )\n elif activation.lower() in ['selu']:\n xavier_fcn(layer.weight,\n gain=nn.init.calculate_gain('relu')\n )\n elif activation.lower() in ['0.1']:\n xavier_fcn(layer.weight,\n gain=0.1,\n )\n elif activation.lower() in ['0.01']:\n xavier_fcn(layer.weight,\n gain=0.01,\n )\n elif activation.lower() in ['0.001']:\n xavier_fcn(layer.weight,\n gain=0.001,\n )\n elif activation.lower() in ['0.003']:\n xavier_fcn(layer.weight,\n gain=0.001,\n )\n else:\n raise AttributeError('Wrong option')\n\n\ndef get_activation(name):\n if name.lower() == 'relu':\n activation = torch.nn.functional.relu\n elif name.lower() == 'elu':\n activation = torch.nn.functional.elu\n elif name.lower() == 'leaky_relu':\n activation = torch.nn.functional.leaky_relu\n elif name.lower() == 'selu':\n activation = torch.nn.functional.selu\n elif name.lower() == 'sigmoid':\n activation = torch.nn.functional.sigmoid\n elif name.lower() == 'tanh':\n activation = torch.tanh\n elif name.lower() in ['linear', 'identity']:\n activation = identity\n else:\n raise AttributeError(\"Pytorch does not have activation '%s'\",\n name)\n return activation\n\n\n\"\"\"\nModule functions\n\"\"\"\n\n\ndef register_parameter(module, parameters_dict, name, param):\n r\"\"\"Adds a parameter to the module.\n\n The parameter can be accessed as an attribute using given name.\n\n Args:\n name (string): name of the parameter. The parameter can be accessed\n from this module using the given name\n parameter (Parameter): parameter to be added to the module.\n \"\"\"\n if hasattr(module, name) and name not in parameters_dict:\n raise KeyError(\"attribute '{}' already exists\".format(name))\n elif '.' in name:\n raise KeyError(\"parameter name can't contain \\\".\\\"\")\n elif name == '':\n raise KeyError(\"parameter name can't be empty string \\\"\\\"\")\n\n if param is None:\n parameters_dict[name] = None\n elif not isinstance(param, nn.Parameter):\n raise TypeError(\"cannot assign '{}' object to parameter '{}' \"\n \"(torch.nn.Parameter or None required)\"\n .format(torch.typename(param), name))\n elif param.grad_fn:\n raise ValueError(\n \"Cannot assign non-leaf Tensor to parameter '{0}'. Model \"\n \"parameters must be created explicitly. To express '{0}' \"\n \"as a function of another Tensor, compute the value in \"\n \"the forward() method.\".format(name))\n else:\n parameters_dict[name] = param\n\n\ndef named_children(modules_dict):\n r\"\"\"Returns an iterator over immediate children modules, yielding both\n the name of the module as well as the module itself.\n\n Yields:\n (string, Module): Tuple containing a name and child module\n\n Example::\n\n >>> for name, module in model.named_children():\n >>> if name in ['conv4', 'conv5']:\n >>> print(module)\n\n \"\"\"\n memo = set()\n for name, module in modules_dict.items():\n if module is not None and module not in memo:\n memo.add(module)\n yield name, module\n\n\ndef named_parameters(modules_dict, parameters_dict, memo=None, prefix=''):\n r\"\"\"Returns an iterator over module parameters, yielding both the\n name of the parameter as well as the parameter itself\n\n Yields:\n (string, Parameter): Tuple containing the name and parameter\n\n Example::\n\n >>> for name, param in self.named_parameters(my_modules_dict, my_parameters_dict):\n >>> if name in ['bias']:\n >>> print(param.size())\n\n \"\"\"\n if memo is None:\n memo = set()\n for name, p in parameters_dict.items():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n for mname, module in named_children(modules_dict):\n submodule_prefix = prefix + ('.' if prefix else '') + mname\n for name, p in module.named_parameters(memo, submodule_prefix):\n yield name, p\n\n\ndef add_module(modules_dict, name, module):\n r\"\"\"Adds a child module to the current module.\n\n The module can be accessed as an attribute using the given name.\n\n Args:\n name (string): name of the child module. The child module can be\n accessed from this module using the given name\n parameter (Module): child module to be added to the module.\n \"\"\"\n if not isinstance(module, nn.Module) and module is not None:\n raise TypeError(\"{} is not a Module subclass\".format(\n torch.typename(module)))\n elif '.' in name:\n raise KeyError(\"module name can't contain \\\".\\\"\")\n elif name == '':\n raise KeyError(\"module name can't be empty string \\\"\\\"\")\n modules_dict[name] = module\n" }, { "alpha_fraction": 0.5902140736579895, "alphanum_fraction": 0.6314984560012817, "avg_line_length": 20.799999237060547, "blob_id": "f7e8fa08c174be238da94cb702ee6a0999f4a7a9", "content_id": "b04e4b303ca544f84580ef7076e1574758f5dd48", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "permissive", "max_line_length": 63, "num_lines": 60, "path": "/examples/simple_envs/test_goalcompo.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from builtins import input\nimport numpy as np\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.envs.simple_envs import Navigation2dGoalCompoEnv\n\nGOAL = (0.65, 0.65)\nTGT_POSE = (0.5, 0.25, 1.4660)\n\n\nPATH_LENGTH = 500\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nenv_params = dict(\n goal_reward=5,\n actuation_cost_coeff=0.5,\n distance_cost_coeff=1.5,\n log_distance_cost_coeff=0,#1.5,\n alpha=1e-6,\n # Initial Condition\n init_position=(-4., -4.),\n init_sigma=1.50,\n # Goal\n goal_position=(5., 5.),\n goal_threshold=0.25,\n # Others\n dynamics_sigma=0.1,\n # horizon=PATH_LENGTH,\n horizon=None,\n)\nenv = NormalizedBoxEnv(\n Navigation2dGoalCompoEnv(**env_params)\n)\nfor ii in range(5):\n env.reset()\n env.render()\n\nenv.reset()\nenv.render()\n\n# input('Press a key to start interacting...')\nfor ii in range(50):\n action = env.action_space.sample()\n obs, reward, done, env_info = env.step(action)\n print('')\n print('---'*3, ii, '---'*3)\n print('action -->', action)\n print('obs -->', obs)\n print('reward -->', reward)\n print('done -->', done)\n print('info -->', env_info)\n env.render()\n\ninput('Press a key to reset...')\n\nenv.reset()\nenv.render()\n\ninput('Press a key to close the script')\n" }, { "alpha_fraction": 0.6676470637321472, "alphanum_fraction": 0.6735293865203857, "avg_line_length": 27.33333396911621, "blob_id": "58b6364ef0293a8703523f252a8b7e088f5663d5", "content_id": "c34efe43eb5e15cd20b52c33c1bd555972847350", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "permissive", "max_line_length": 69, "num_lines": 12, "path": "/robolearn/torch/utils/nn/networks/flatten_mlp.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch\nfrom robolearn.torch.utils.nn.networks.mlp import Mlp\n\n\nclass FlattenMlp(Mlp):\n \"\"\"\n Flatten inputs along dimension -1 and then pass through MLP.\n \"\"\"\n\n def forward(self, *nn_inputs, **kwargs):\n flat_inputs = torch.cat(nn_inputs, dim=-1)\n return super(FlattenMlp, self).forward(flat_inputs, **kwargs)\n" }, { "alpha_fraction": 0.8659793734550476, "alphanum_fraction": 0.8659793734550476, "avg_line_length": 47.5, "blob_id": "b603d14c8b8b0acaae6c634124955e49f09040b5", "content_id": "5ffa3695080838d9d8b166f7f8dcdfead59f7796", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "permissive", "max_line_length": 60, "num_lines": 2, "path": "/robolearn/torch/utils/distributions/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .tanh_normal import TanhNormal\nfrom .tanh_multivariate_normal import TanhMultivariateNormal\n" }, { "alpha_fraction": 0.4885496199131012, "alphanum_fraction": 0.5190839767456055, "avg_line_length": 23.5625, "blob_id": "96c9c3f5c9cbcb35fc2ce8a3b1a9b12213750f79", "content_id": "dc451df22b99210d101b1758b50f888f608ecb33", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 393, "license_type": "permissive", "max_line_length": 55, "num_lines": 16, "path": "/scenarios/humanoids2018/scripts/reacher_gps_all.sh", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ngps_script_numbers=(1 2 3 4 5)\n\ndefault_seeds=(0 50 100)\nseeds=(\"${@:-${default_seeds[@]}}\")\n\nDIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\nfor index in ${!gps_script_numbers[@]}; do\n number=${gps_script_numbers[index]}\n echo \"************************************\"\n echo \"Running gps_'${number}'\"\n\n ${DIR}/reacher_gps${number}.sh \"${seeds[@]}\"\n\ndone\n" }, { "alpha_fraction": 0.6536661386489868, "alphanum_fraction": 0.6536661386489868, "avg_line_length": 24.639999389648438, "blob_id": "c9024e09cca8e1e3f09d982cbb94c4002117ad8b", "content_id": "6efcb926c143c7697be5730e12728fdd6ee26823", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "permissive", "max_line_length": 58, "num_lines": 25, "path": "/robolearn/torch/algorithms/torch_algorithm.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import abc\nfrom future.utils import with_metaclass\nfrom typing import Iterable\n\nfrom robolearn.torch.core import PyTorchModule\n\n\nclass TorchAlgorithm(with_metaclass(abc.ABCMeta, object)):\n @property\n @abc.abstractmethod\n def torch_models(self) -> Iterable[PyTorchModule]:\n # # type: (None) -> Iterable[PyTorchModule]\n pass\n\n def training_mode(self, mode):\n for model in self.torch_models:\n model.train(mode)\n\n def cuda(self, device=None):\n for model in self.torch_models:\n model.cuda(device)\n\n def cpu(self):\n for model in self.torch_models:\n model.cpu()\n" }, { "alpha_fraction": 0.8529411554336548, "alphanum_fraction": 0.8529411554336548, "avg_line_length": 44.33333206176758, "blob_id": "a82676acb32bd347dc962c86322d7203fbca7126", "content_id": "b96f034ef8e3b2c1530a3ea05628c4d6c3837aeb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "permissive", "max_line_length": 53, "num_lines": 3, "path": "/robolearn/utils/samplers/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .rollout import rollout\nfrom .in_place_path_sampler import InPlacePathSampler\nfrom .exploration_rollout import exploration_rollout\n" }, { "alpha_fraction": 0.46011754870414734, "alphanum_fraction": 0.5011395215988159, "avg_line_length": 35.72246551513672, "blob_id": "e8eb6e845f5f902f90fc0b0db5f811a102f62d55", "content_id": "af38e312e906e7c9bf126ca916b4e636a62267de", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8337, "license_type": "permissive", "max_line_length": 88, "num_lines": 227, "path": "/scenarios/humanoids2018/plots/plot_states_trajs.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport pickle\nimport os, sys\n\n# NOTE: IT REQUIRES TO GENERATE ALL THE REQUIRED DATA IN DIR_NAME\n\ndir_name = 'state_data_mdgps_log/run_00/'\n\ncond = 3 # Training condition to plot\noption = -1 # Option:-1:2d | -2:2dObst | -3:2dTgt | Positive: specified state idx\nblock = True # Block plot or not (for visualization)\n\ntgt_positions = [(0.3693, 0.6511),\n (0.3913, 0.6548),\n (0.3296, 0.6863),\n (0.6426, 0.4486),\n (0.1991, 0.7172)]\nobst_positions = [(0.6545, -0.0576),\n (0.6964, -0.0617),\n (0.6926, -0.0929),\n (0.6778, -0.013),\n (0.6781, -0.0882)]\n\nsafe_distance = 0.15\n\nif option in [-1, -3]:\n idx_to_plot = [6, 7]\n plt.rcParams[\"figure.figsize\"] = (5, 5)\nelif option == -2:\n idx_to_plot = [9, 10]\n plt.rcParams[\"figure.figsize\"] = (5, 5)\nelse:\n idx_to_plot = [option]\n plt.rcParams[\"figure.figsize\"] = (30, 15)\n\nfile_path = os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n\nlines = list()\nlabels = list()\nfor itp in range(1, 50):\n\n file_to_load = file_path + 'all_x_itr_%02d_cond_%02d.pkl' % (itp, cond)\n\n all_x = pickle.load(open(file_to_load, 'rb'))\n\n sample_data = pickle.load(open(file_to_load, 'rb'))\n\n fig, ax = plt.subplots(1, 1)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Traj Iteration %02d '\n 'Condition %02d' % (itp, cond))\n fig.set_facecolor((1, 1, 1))\n\n mus_new = all_x['new']\n mus_prev = all_x['prev']\n mus_bad = all_x['bad']\n mus_good = all_x['good']\n\n T = mus_new.shape[0]\n mus_to_plot = np.zeros((T, len(idx_to_plot)))\n mus_prev_to_plot = np.zeros((T, len(idx_to_plot)))\n mus_bad_to_plot = np.zeros((T, len(idx_to_plot)))\n mus_good_to_plot = np.zeros((T, len(idx_to_plot)))\n\n if option in [-1, -3]:\n list_to_substact = tgt_positions\n else:\n list_to_substact = obst_positions\n\n if option in [-1, -2, -3]:\n for ii, index in enumerate(idx_to_plot):\n mus_to_plot[:, ii] = list_to_substact[cond][ii] + mus_new[:, index]\n mus_prev_to_plot[:, ii] = list_to_substact[cond][ii] + mus_prev[:, index]\n mus_bad_to_plot[:, ii] = list_to_substact[cond][ii] + mus_bad[:, index]\n mus_good_to_plot[:, ii] = list_to_substact[cond][ii] + mus_good[:, index]\n else:\n for ii, index in enumerate(idx_to_plot):\n mus_to_plot[:, ii] = mus_new[:, index]\n mus_prev_to_plot[:, ii] = mus_prev[:, index]\n mus_bad_to_plot[:, ii] = mus_bad[:, index]\n mus_good_to_plot[:, ii] = mus_good[:, index]\n\n if option not in [-1, -2, -3]:\n # ###########\n # PER STATE #\n # ###########\n\n line = ax.plot(np.arange(T)*0.03,\n mus_prev_to_plot[:],\n c='black')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Previous Traj.')\n line = ax.plot(np.arange(T)*0.03,\n mus_bad_to_plot[:],\n c='red')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Bad Traj.')\n\n line = ax.plot(np.arange(T)*0.03,\n mus_good_to_plot[:],\n c='green')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Good Traj.')\n line = ax.plot(np.arange(T)*0.03,\n mus_to_plot[:],\n c='blue')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Updated Traj.')\n\n ax.set_xlabel(\"Time\", fontsize=20, weight='bold')\n ax.set_ylabel(\"State %02d\" % idx_to_plot[0],\n fontsize=20, weight='bold')\n ax.xaxis.grid(color='white', linewidth=2)\n ax.set_xlim([0., 15.])\n\n else:\n # ####\n # 2D #\n # ####\n\n line = ax.plot(mus_prev_to_plot[:, 0],\n mus_prev_to_plot[:, 1],\n c='black')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Previous Traj.')\n line = ax.plot(mus_bad_to_plot[:, 0],\n mus_bad_to_plot[:, 1],\n c='red')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Bad Traj.')\n\n line = ax.plot(mus_good_to_plot[:, 0],\n mus_good_to_plot[:, 1],\n c='green')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Good Traj.')\n line = ax.plot(mus_to_plot[:, 0],\n mus_to_plot[:, 1],\n c='blue')[0]\n if itp == 1:\n lines.append(line)\n labels.append('Updated Traj.')\n\n if tgt_positions is not None:\n tgt = tgt_positions[cond]\n circle1 = plt.Circle(tgt, 0.02, facecolor='green', alpha=0.6,\n edgecolor='black')\n ax.add_artist(circle1)\n if obst_positions is not None:\n if safe_distance is not None:\n obstacle = np.array(obst_positions[cond])\n circle2 = plt.Circle(obstacle, safe_distance, color='black',\n alpha=0.1)\n ax.add_artist(circle2)\n obstacle = obst_positions[cond]\n circle2 = plt.Circle(obstacle, 0.05, color='red', alpha=0.2)\n ax.add_artist(circle2)\n\n ax.set_xlabel(\"X\", fontsize=20, weight='bold')\n ax.set_ylabel(\"Y\",\n fontsize=20, weight='bold')\n\n if option == -1:\n ax.set_xlim([-0.0, 1.])\n ax.set_ylim([-1.0, 0.75])\n ax.set_aspect(1.75)\n elif option == -2:\n ax.set_title('Obstacle', fontweight='bold', fontsize=25)\n ax.set_xlim([list_to_substact[cond][0] - 0.2,\n list_to_substact[cond][0] + 0.2])\n ax.set_ylim([list_to_substact[cond][1] - 0.2,\n list_to_substact[cond][1] + 0.2])\n ax.set_aspect(1)\n ax.xaxis.set_ticklabels([])\n ax.yaxis.set_ticklabels([])\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n else:\n ax.set_title('Target', fontweight='bold', fontsize=25)\n ax.set_xlim([list_to_substact[cond][0] - 0.2,\n list_to_substact[cond][0] + 0.2])\n ax.set_ylim([list_to_substact[cond][1] - 0.2,\n list_to_substact[cond][1] + 0.2])\n ax.set_aspect(1)\n ax.xaxis.set_ticklabels([])\n ax.yaxis.set_ticklabels([])\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ax.tick_params(axis='x', labelsize=15)\n ax.tick_params(axis='y', labelsize=15)\n\n # Background\n ax.set_facecolor((0.917, 0.917, 0.949))\n\n if option == -1:\n fig.legend(lines, labels, loc='center right', ncol=1)\n plt.savefig(\"all_plots/2d/itr_%02d_cond_%02d.svg\" % (itp, cond))\n plt.savefig(\"all_plots/itr_%02d_cond_%02d.svg\" % (itp, cond))\n plt.savefig(\"all_plots/2d/itr_%02d_cond_%02d.png\" % (itp, cond))\n plt.savefig(\"all_plots/itr_%02d_cond_%02d.png\" % (itp, cond))\n elif option == -2:\n # fig.legend(lines, labels, loc='center right', ncol=1)\n plt.savefig(\"all_plots/obst/itr_%02d_cond_%02d_obst.svg\" % (itp, cond))\n plt.savefig(\"all_plots/obst/itr_%02d_cond_%02d_obst.png\" % (itp, cond))\n elif option == -3:\n # fig.legend(lines, labels, loc='center right', ncol=1)\n plt.savefig(\"all_plots/_object/itr_%02d_cond_%02d_tgt.svg\" % (itp, cond))\n plt.savefig(\"all_plots/_object/itr_%02d_cond_%02d_tgt.png\" % (itp, cond))\n else:\n fig.legend(lines, labels, loc='center right', ncol=1, fontsize=15)\n plt.savefig(\"all_plots/itr_%02d_cond_%02d_state_%02d.svg\" % (itp, cond, option))\n plt.savefig(\"all_plots/itr_%02d_cond_%02d_state_%02d.png\" % (itp, cond, option))\n\nplt.show(block=block)\n\n" }, { "alpha_fraction": 0.6440856456756592, "alphanum_fraction": 0.6625216603279114, "avg_line_length": 43.04359817504883, "blob_id": "b78fa5c334f96af5d0b4593ffc162aa554abfb95", "content_id": "16347de56fb3b10cbbb445e9aa1122d56ca2657c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16164, "license_type": "permissive", "max_line_length": 143, "num_lines": 367, "path": "/scenarios/tests/trajrep-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport math\nimport os\nimport rospy\nimport matplotlib.pyplot as plt\nimport tf\nfrom XCM.msg import CommandAdvr\nfrom XCM.msg import JointStateAdvr\nfrom robolearn.old_utils.trajectory_reproducer import TrajectoryReproducer\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.transformations_utils import *\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation\nfrom robolearn.old_utils.plot_utils import plot_desired_sensed_torque_position\nfrom robolearn.old_utils.plot_utils import plot_joint_info\nfrom gazebo_msgs.srv import SpawnModel\nfrom gazebo_msgs.srv import DeleteModel\nfrom geometry_msgs.msg import Pose\nimport rbdl\n\nfrom robolearn.old_utils.robot_model import RobotModel\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n# Always turn off logger\nos.system(\"gz log -d 0\")\n\n#current_path = os.path.abspath(__file__)\npath = os.path.abspath(__file__)\ndir_path = os.path.dirname(path)\n\nload_torques = False\ntorques_saved_filename = 'torques_init_traj.npy'\n\nT_init = 3\nT_init_traj = 5\nT_impedance_zero = 10\nT_sleep = 2.\nremove_spawn_new_box = False\nfreq = 100\nbox_position = np.array([0.75,\n 0.00,\n 0.0184])\nbox_size = [0.4, 0.5, 0.3]\nbox_yaw = 0 # Degrees\nbox_orient = tf.transformations.rotation_matrix(np.deg2rad(box_yaw), [0, 0, 1])\nbox_matrix = homogeneous_matrix(rot=box_orient, pos=box_position)\n\nreach_method = 0\nlift_method = 2\n\n#traj_files = ['trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)+'_m'+str(reach_method)+'_reach.npy',\n# 'trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)+'_m'+str(lift_method)+'_lift.npy']\ntraj_files = ['trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)+'_m'+str(reach_method)+'_reach.npy']\ntraj_rep = TrajectoryReproducer(traj_files)\n\ndefault_joint_stiffness = np.array([8000., 5000., 8000., 5000., 5000., 2000.,\n 8000., 5000., 5000., 5000., 5000., 2000.,\n 5000., 8000., 5000.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.,\n 300., 300.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.])\ndefault_joint_damping = np.array([30., 50., 30., 30., 30., 5.,\n 30., 50., 30., 30., 30., 5.,\n 30., 50., 30.,\n 30., 50., 30., 30., 1., 5., 1.,\n 1., 1.,\n 30., 50., 30., 30., 1., 5., 1.])\n\n# ROBOT MODEL for trying ID\nrobot_urdf = '/home/domingo/robotology-superbuild/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf)\n#LH_name = 'LWrMot3'\n#RH_name = 'RWrMot3'\n\njoint_pos_state = np.zeros(robot_model.q_size)\njoint_vel_state = np.zeros(robot_model.qdot_size)\njoint_effort_state = np.zeros(robot_model.qdot_size)\njoint_stiffness_state = np.zeros(robot_model.qdot_size)\njoint_damping_state = np.zeros(robot_model.qdot_size)\njoint_state_id = []\n\n\ndef callback(data, params):\n joint_ids = params[0]\n joint_pos_state = params[1]\n joint_effort_state = params[2]\n #if not joint_ids:\n # joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_pos_state[joint_ids] = data.link_position\n joint_effort_state[joint_ids] = data.effort\n joint_stiffness_state[joint_ids] = data.stiffness\n joint_damping_state[joint_ids] = data.damping\n joint_vel_state[joint_ids] = data.link_velocity\n\npublisher = rospy.Publisher(\"/xbotcore/bigman/command\", CommandAdvr, queue_size=10)\nsubscriber = rospy.Subscriber(\"/xbotcore/bigman/joint_states\", JointStateAdvr, callback, (joint_state_id, joint_pos_state, joint_effort_state))\nrospy.init_node('traj_example')\npub_rate = rospy.Rate(freq)\ndes_cmd = CommandAdvr()\ndes_cmd.name = bigman_params['joints_names']\n\nq_init = traj_rep.get_data(0)*0\nN = int(np.ceil(T_init*freq))\njoint_init_traj = polynomial5_interpolation(N, q_init, joint_pos_state)[0]\nprint(\"Moving to zero configuration\")\nfor ii in range(N):\n des_cmd.position = joint_init_traj[ii, :]\n des_cmd.stiffness = default_joint_stiffness\n des_cmd.damping = default_joint_damping\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\nq_init = traj_rep.get_data(0)\nN = int(np.ceil(T_init_traj*freq))\njoint_init_traj = polynomial5_interpolation(N, q_init, joint_pos_state)[0]\njoint_init_traj_dots = np.vstack((np.diff(joint_init_traj, axis=0), np.zeros((1, traj_rep.dim))))*freq\njoint_init_traj_ddots = np.vstack((np.diff(joint_init_traj_dots, axis=0), np.zeros((1, traj_rep.dim))))*freq*freq\n\ntau = np.zeros(robot_model.qdot_size)\na = np.zeros(robot_model.qdot_size)\nM = np.zeros((robot_model.qdot_size, robot_model.qdot_size))\njoints_to_move = bigman_params['joint_ids']['BA'][:7]\n#joints_to_move = [bigman_params['joint_ids']['BA'][6]]\ndes_cmd.name = [bigman_params['joints_names'][idx] for idx in joints_to_move]\n#raw_input(\"Press key for moving to the initial configuration of trajectory\")\ndes_cmd.position = []\nqs_init_traj = np.zeros((N, robot_model.q_size))\ntaus_cmd_init_traj = np.zeros((N, robot_model.qdot_size))\n\nif load_torques:\n taus_init_traj = np.load(torques_saved_filename)\nelse:\n taus_init_traj = np.zeros((N, robot_model.qdot_size))\n\nprint(\"Moving to the initial configuration of trajectory\")\nraw_input(\"Press a key to continue...\")\nfor ii in range(N):\n if load_torques:\n print(\"Reproducing previous torques!\")\n des_cmd.effort = taus_init_traj[ii, joints_to_move]\n des_cmd.stiffness = np.zeros_like(tau[joints_to_move])\n des_cmd.damping = np.zeros_like(tau[joints_to_move])\n else:\n #des_cmd.position = joint_init_traj[ii, joints_to_move]\n #des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n #des_cmd.damping = default_joint_damping[joints_to_move]\n #taus_init_traj[ii, :] = joint_effort_state\n #print(joint_init_traj[ii, joints_to_move] - joint_pos_state[joints_to_move])\n #robot_model.update_torque(tau, joint_init_traj[ii, :], joint_init_traj_dots[ii, :]*freq,\n # joint_init_traj_ddots[ii, :]*freq*freq)\n #robot_model.update_coriolis_forces(tau, joint_pos_state, joint_vel_state)\n #robot_model.update_coriolis_forces(tau, joint_pos_state, joint_vel_state*0)\n\n #a = joint_init_traj_ddots[ii, :] + \\\n a = default_joint_damping * (joint_init_traj_dots[ii, :] - joint_vel_state) + \\\n default_joint_stiffness * (joint_init_traj[ii, :] - joint_pos_state)\n robot_model.update_inertia_matrix(M, joint_pos_state)\n robot_model.update_torque(tau, joint_pos_state, joint_vel_state,\n joint_init_traj_ddots[ii, :])\n tau += M.dot(a)\n des_cmd.effort = tau[joints_to_move]\n des_cmd.stiffness = np.zeros_like(tau[joints_to_move])\n des_cmd.damping = np.zeros_like(tau[joints_to_move])\n publisher.publish(des_cmd)\n taus_init_traj[ii, :] = joint_effort_state\n taus_cmd_init_traj[ii, :] = tau\n pub_rate.sleep()\njoints_to_plot = bigman_params['joint_ids']['LA']\ncols = 3\njoint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\nplot_desired_sensed_torque_position(joints_to_plot, taus_cmd_init_traj, taus_init_traj,\n joint_init_traj, qs_init_traj, joint_names, block=True, cols=cols)\n\nprint(\"Saving sensed torques in %s\" % torques_saved_filename)\nnp.save(torques_saved_filename, taus_init_traj)\nsys.exit()\n\ndes_cmd.stiffness = []\ndes_cmd.damping = []\n\nif remove_spawn_new_box:\n f = open('/home/domingo/robotlearning-superbuild/catkin_ws/src/robolearn_gazebo_env/models/cardboard_cube_box/model.sdf', 'r')\n sdf_box = f.read()\n f = open('/home/domingo/robotlearning-superbuild/catkin_ws/src/robolearn_gazebo_env/models/big_support/model.sdf', 'r')\n sdf_box_support = f.read()\n box_pose = Pose()\n box_pose.position.x = box_position[0]\n box_pose.position.y = box_position[1]\n box_pose.position.z = 1.014\n box_quat = tf.transformations.quaternion_from_matrix(box_matrix)\n box_pose.orientation.x = box_quat[0]\n box_pose.orientation.y = box_quat[1]\n box_pose.orientation.z = box_quat[2]\n box_pose.orientation.w = box_quat[3]\n box_support_pose = Pose()\n box_support_pose.position.x = box_position[0]\n box_support_pose.position.y = box_position[1]\n box_support_pose.position.z = 0\n box_support_pose.orientation = box_pose.orientation\n rospy.wait_for_service('gazebo/delete_model')\n delete_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)\n print(\"Deleting previous box...\")\n #raw_input(\"Press for delete box_support\")\n try:\n delete_model_prox(\"box_support\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/delete_model service call failed: %s\" % str(exc))\n try:\n delete_model_prox(\"box\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/delete_model service call failed: %s\" % str(exc))\n rospy.wait_for_service('gazebo/spawn_sdf_model')\n spawn_model_prox = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)\n print(\"Spawning new box...\")\n try:\n spawn_model_prox(\"box_support\", sdf_box_support, \"box_support\", box_support_pose, \"world\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/spawn_sdf_model service call failed: %s\" % str(exc))\n try:\n spawn_model_prox(\"box\", sdf_box, \"box\", box_pose, \"world\")\n except rospy.ServiceException as exc:\n print(\"/gazebo/spawn_sdf_model service call failed: %s\" % str(exc))\n\n\n\nqs = traj_rep.traj.copy()\nsensed_qs = np.zeros_like(qs)\nqdots = np.vstack((np.diff(qs, axis=0), np.zeros((1, traj_rep.dim))))\nqddots = np.vstack((np.diff(qdots, axis=0), np.zeros((1, traj_rep.dim))))\ntaus = np.zeros_like(qdots)\nsensed_taus = np.zeros_like(taus)\n\nq = np.zeros(robot_model.q_size)\nqdot = np.zeros(robot_model.qdot_size)\nqddot = np.zeros(robot_model.qdot_size)\ntau = np.zeros(robot_model.qdot_size)\n\nprint(\"Sleeping for %.2f secs\" % T_sleep)\nrospy.sleep(T_sleep)\n\nprint('joint_stiffness = %s' % repr(joint_stiffness_state))\nprint('joint_damping = %s' % repr(joint_damping_state))\n#raw_input()\n\n## Set impedance to zero in order to perform pure torque control\n#print(\"Setting impedance to zero...\")\n#Timp = 0.5\n#for ii in range(int(Timp*freq)):\n# #des_cmd.position = joint_init_traj[ii, :]\n# des_cmd.stiffness = np.zeros_like(qdot)\n# des_cmd.damping = np.zeros_like(qdot)\n# publisher.publish(des_cmd)\n# pub_rate.sleep()\n\n#raw_input(\"Press key for reproducing trajectory\")\nprint(\"Reproducing trajectory (ONLY ARMS)...\")\njoints_to_move = bigman_params['joint_ids']['BA'][5:6]\njoints_to_move = [bigman_params['joint_ids']['BA'][6]]\ndes_cmd.name = [bigman_params['joints_names'][idx] for idx in joints_to_move]\n#robot_model.update_torque(tau, joint_pos_state, joint_vel_state, qddot)\nrobot_model.update_torque(tau, joint_pos_state, qdot, qddot)\nprint(tau[joints_to_move])\nprint(joint_effort_state[joints_to_move])\ndes_cmd.position = []\ndes_cmd.effort = tau[joints_to_move]\n#des_cmd.effort = joint_effort_state[bigman_params['joint_ids']['BA'][:4]]\ndes_cmd.stiffness = np.zeros_like(default_joint_stiffness[joints_to_move])\ndes_cmd.damping = np.zeros_like(default_joint_damping[joints_to_move])\nqs2 = np.tile(joint_pos_state[:], (T_impedance_zero*freq, 1))\ntaus2 = np.tile(tau[:], (T_impedance_zero*freq, 1))\nsensed_qs2 = np.zeros((T_impedance_zero*freq, robot_model.q_size))\nsensed_taus2 = np.zeros((T_impedance_zero*freq, robot_model.qdot_size))\n\ntemp_tau = np.zeros((T_impedance_zero*freq, robot_model.q_size))\ntemp_stiff = np.zeros((T_impedance_zero*freq, robot_model.q_size))\ntemp_damp = np.zeros((T_impedance_zero*freq, robot_model.q_size))\nfor qq in range(robot_model.q_size):\n temp_tau[:, qq] = np.linspace(joint_effort_state[qq], 0, T_impedance_zero*freq)\n temp_stiff[:, qq] = np.linspace(default_joint_stiffness[qq], 0, T_impedance_zero*freq)\n temp_damp[:, qq] = np.linspace(default_joint_damping[qq], 0, T_impedance_zero*freq)\n\nos.system(\"gz log -d 1\")\nraw_input(\"Press a key for setting impedance to zero\")\n#for ii in range(T_impedance_zero*freq):\n# print(\"Decreasing zero stiffness and damping...\")\n# des_cmd.effort = temp_tau[-ii-1, joints_to_move]\n# des_cmd.stiffness = temp_stiff[ii, joints_to_move]\n# des_cmd.damping = temp_damp[ii, joints_to_move]\n# publisher.publish(des_cmd)\n# pub_rate.sleep()\nfor ii in range(T_impedance_zero*freq):\n print(\"Sending zero stiffness and damping...\")\n robot_model.update_torque(tau, qs[0,:], qdot, qddot)\n #robot_model.update_coriolis_forces(tau, joint_pos_state, joint_vel_state)\n #robot_model.update_coriolis_forces(tau, joint_pos_state, joint_vel_state*0)\n des_cmd.effort = tau[joints_to_move]\n sensed_taus2[ii, ] = joint_effort_state\n sensed_qs2[ii, :] = joint_pos_state\n publisher.publish(des_cmd)\n pub_rate.sleep()\nos.system(\"gz log -d 0\")\njoints_to_plot = bigman_params['joint_ids']['LA']\ncols = 3\njoint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\nplot_desired_sensed_torque_position(joints_to_plot, taus2, sensed_taus2, qs2, sensed_qs2, joint_names, block=False, cols=cols)\n#sys.exit()\nraw_input(\"AA\")\n\n\ndes_cmd.position = []\ndes_cmd.effort = []\ndes_cmd.stiffness = []\ndes_cmd.damping = []\nraw_input(\"Press a key for sending commands\")\nfor ii in range(traj_rep.data_points):\n#for ii in range(20):\n #print(\"Sending LIFTING cmd...\")\n #error = joint_lift_trajectory[ii, :] - joint_pos_state\n #print(error[bigman_params['joint_ids']['BA']])\n #des_cmd.position += K*error\n q[joints_to_move] = joint_effort_state[joints_to_move]\n qdot[joints_to_move] = qdots[ii, joints_to_move]*freq\n qddot[joints_to_move] = qddots[ii, joints_to_move]*freq*freq\n robot_model.update_torque(tau, q, qdot, qddot)\n taus[ii, :] = tau\n sensed_taus[ii, ] = joint_effort_state\n sensed_qs[ii, :] = joint_pos_state\n print(\"joint_names: %s\" % [bigman_params['joints_names'][id] for id in joints_to_move])\n print(\"q: %s\" % repr(q[joints_to_move]))\n print(\"tau: %s\" % repr(tau[joints_to_move]))\n print(\"tau_state: %s\" % repr(joint_effort_state[joints_to_move]))\n print(\"--\")\n #des_cmd.position = traj_rep.get_data(ii)[bigman_params['joint_ids']['BA'][joints_to_move]]\n #des_cmd.position = q[joints_to_move]\n des_cmd.effort = tau[joints_to_move]\n des_cmd.stiffness = np.zeros_like(qdot[joints_to_move])\n des_cmd.damping = np.zeros_like(qdot[joints_to_move])\n #des_cmd.stiffness = default_joint_stiffness[bigman_params['joint_ids']['BA'][joints_to_move]]\n #des_cmd.damping = default_joint_damping[bigman_params['joint_ids']['BA'][joints_to_move]]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n#fig, axs = plt.subplots(dU/cols+1, cols)\n#fig.canvas.set_window_title(\"Positions\")\n#fig.set_facecolor((0.5, 0.5, 0.5))\n#for ii in range(dU):\n# #plt.subplot(dU/cols+1, cols, ii+1)\n# print(ii/cols)\n# print(ii%cols)\n# print(\"-\")\n# axs[ii/cols, ii % cols].set_title(\"Position %d: %s\" % (ii+1, bigman_params['joints_names'][joints_to_plot[ii]]))\n# print(joints_to_plot[ii])\n# print(bigman_params['joints_names'][joints_to_plot[ii]])\n# axs[ii/cols, ii % cols].plot(qs[:, joints_to_plot[ii]], 'b')\n# axs[ii/cols, ii % cols].plot(sensed_qs[:, joints_to_plot[ii]], 'r')\n#plt.show(block=False)\n\njoints_to_plot = bigman_params['joint_ids']['LA']\ncols = 3\njoint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\nplot_desired_sensed_torque_position(joints_to_plot, taus, sensed_taus, qs, sensed_qs, joint_names, block=False, cols=cols)\n#plot_joint_info(joints_to_plot, taus, joint_names, data='torque')\n\n\nraw_input(\"Plotting..\")\n" }, { "alpha_fraction": 0.5948248505592346, "alphanum_fraction": 0.5986115336418152, "avg_line_length": 28.59813117980957, "blob_id": "8eaa20c9bc962cfe3c60f9332c71918d9fcbd2ad", "content_id": "36f838d33a601092ef8676ffc5a7f8d91702b44c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3169, "license_type": "permissive", "max_line_length": 86, "num_lines": 107, "path": "/robolearn/torch/utils/distributions/tanh_multivariate_normal.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis code is based on TanhNormal class.\nhttps://github.com/vitchyr/rlkit\n\"\"\"\nimport torch\nfrom torch.distributions import Distribution\nfrom torch.distributions.utils import lazy_property\n\n# from torch.distributions import MultivariateNormal\nfrom robolearn.torch.utils.distributions.multivariate_normal import MultivariateNormal\n\n\nclass TanhMultivariateNormal(Distribution):\n \"\"\"\n Represent distribution of X where\n X ~ tanh(Z)\n Z ~ N(mean, std)\n\n Note: this is not very numerically stable.\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"\n\n Args:\n mvn_mean (Tensor): Mean of the normal distribution\n mvn_covar (Tensor): Std of the normal distribution\n epsilon (Double): Numerical stability epsilon when computing\n log-prob.\n \"\"\"\n epsilon = kwargs.pop('epsilon', 1.e-6)\n super(TanhMultivariateNormal, self).__init__()\n\n self._multivariate_normal = MultivariateNormal(*args, **kwargs)\n self._epsilon = epsilon\n\n @property\n def mean(self):\n return self._multivariate_normal.mean\n\n @property\n def variance(self):\n return self._multivariate_normal.variance\n\n @property\n def stddev(self):\n return self._multivariate_normal.stddev\n\n @lazy_property\n def covariance_matrix(self):\n return self._multivariate_normal.covariance_matrix\n\n @property\n def epsilon(self):\n return self._epsilon\n\n def sample(self, return_pretanh_value=False):\n z = self._multivariate_normal.sample()\n if return_pretanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)\n\n def rsample(self, return_pretanh_value=False):\n z = self._multivariate_normal.rsample()\n\n if return_pretanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)\n\n def sample_n(self, n, return_pre_tanh_value=False):\n z = self._multivariate_normal.sample_n(n)\n if return_pre_tanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)\n\n def log_prob(self, value, pre_tanh_value=None):\n \"\"\"\n Returns the log of the probability density function evaluated at\n `value`.\n\n Args:\n value (Tensor):\n pre_tanh_value (Tensor): arctan(value)\n\n Returns:\n log_prob (Tensor)\n\n \"\"\"\n if pre_tanh_value is None:\n pre_tanh_value = torch.log(\n (1+value) / (1-value)\n ) / 2\n\n return self._multivariate_normal.log_prob(pre_tanh_value) - \\\n torch.sum(torch.log(1. - value * value + self._epsilon), dim=-1)\n # return self.normal.log_prob(pre_tanh_value) - \\\n # torch.log(1. - torch.tanh(pre_tanh_value)**2 + self._epsilon)\n\n def cdf(self, value, pre_tanh_value=None):\n if pre_tanh_value is None:\n pre_tanh_value = torch.log(\n (1+value) / (1-value)\n ) / 2\n # TODO: MAYBE THE FOLLOWING IS NOT CORRECT - maybe apply pretanh?:\n return self._multivariate_normal.cdf(pre_tanh_value)\n\n\n" }, { "alpha_fraction": 0.5705358386039734, "alphanum_fraction": 0.5964692831039429, "avg_line_length": 29.193395614624023, "blob_id": "ca8ee8bd1106812392b79cc6db2af3c8b4bb3040", "content_id": "d21231f8576ddde5ff5b5a8e9bb8d432dbd41d07", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6401, "license_type": "permissive", "max_line_length": 94, "num_lines": 212, "path": "/examples/mujoco_envs/mujoco_all_sql_haarnoja.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import argparse\n\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.envs.mujoco.swimmer_env import SwimmerEnv\nfrom rllab.envs.mujoco.ant_env import AntEnv\nfrom rllab.envs.mujoco.humanoid_env import HumanoidEnv\nfrom rllab.misc.instrument import VariantGenerator\n\nfrom softqlearning.misc.instrument import run_sql_experiment\nfrom softqlearning.algorithms import SQL\nfrom softqlearning.misc.kernel import adaptive_isotropic_gaussian_kernel\nfrom softqlearning.misc.utils import timestamp\nfrom softqlearning.replay_buffers import SimpleReplayBuffer\nfrom softqlearning.value_functions import NNQFunction\nfrom softqlearning.policies import StochasticNNPolicy\nfrom softqlearning.environments import GymEnv\nfrom softqlearning.misc.sampler import SimpleSampler\n\nSHARED_PARAMS = {\n 'seed': [1, 2, 3],\n 'policy_lr': 3E-4,\n 'qf_lr': 3E-4,\n 'discount': 0.99,\n 'layer_size': 128,\n 'batch_size': 128,\n 'max_pool_size': 1E6,\n 'n_train_repeat': 1,\n 'epoch_length': 1000,\n 'kernel_particles': 16,\n 'kernel_update_ratio': 0.5,\n 'value_n_particles': 16,\n 'td_target_update_interval': 1000,\n 'snapshot_mode': 'last',\n 'snapshot_gap': 100,\n}\n\n\nENV_PARAMS = {\n 'swimmer': { # 2 DoF\n 'prefix': 'swimmer',\n 'env_name': 'swimmer-rllab',\n 'max_path_length': 1000,\n 'n_epochs': 500,\n 'reward_scale': 30,\n },\n 'hopper': { # 3 DoF\n 'prefix': 'hopper',\n 'env_name': 'Hopper-v1',\n 'max_path_length': 1000,\n 'n_epochs': 2000,\n 'reward_scale': 30,\n },\n 'half-cheetah': { # 6 DoF\n 'prefix': 'half-cheetah',\n 'env_name': 'HalfCheetah-v2',\n 'max_path_length': 1000,\n 'n_epochs': 10000,\n 'reward_scale': 30,\n 'max_pool_size': 1E7,\n },\n 'walker': { # 6 DoF\n 'prefix': 'walker',\n 'env_name': 'Walker2d-v1',\n 'max_path_length': 1000,\n 'n_epochs': 5000,\n 'reward_scale': 10,\n },\n 'ant': { # 8 DoF\n 'prefix': 'ant',\n 'env_name': 'Ant-v1',\n 'max_path_length': 1000,\n 'n_epochs': 10000,\n 'reward_scale': 300,\n },\n 'ant-rllab': { # 8 DoF\n 'prefix': 'ant-rllab',\n 'env_name': 'ant-rllab',\n 'max_path_length': 1000,\n 'n_epochs': 10000,\n 'reward_scale': [1, 3, 10, 30, 100, 300]\n },\n 'humanoid-rllab': { # 21 DoF\n 'seed': [11, 12, 13, 14, 15],\n 'prefix': 'humanoid',\n 'env_name': 'humanoid-rllab',\n 'max_path_length': 1000,\n 'n_epochs': 20000,\n 'reward_scale': 100,\n },\n 'humanoid': { # 21 DoF\n 'prefix': 'humanoid',\n 'env_name': 'Humanoid-v2',\n 'max_path_length': 1000,\n 'n_epochs': 20000,\n 'reward_scale': 100,\n },\n}\nDEFAULT_ENV = 'swimmer'\nAVAILABLE_ENVS = list(ENV_PARAMS.keys())\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--env', type=str, choices=AVAILABLE_ENVS, default=DEFAULT_ENV)\n parser.add_argument('--exp_name', type=str, default=timestamp())\n parser.add_argument('--mode', type=str, default='local')\n # parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--log_dir', type=str, default='/home/desteban/logs/haarnoja/huma')\n args = parser.parse_args()\n\n return args\n\n\ndef get_variants(args):\n env_params = ENV_PARAMS[args.env]\n params = SHARED_PARAMS\n params.update(env_params)\n\n vg = VariantGenerator()\n for key, val in params.items():\n if isinstance(val, list):\n vg.add(key, val)\n else:\n vg.add(key, [val])\n\n return vg\n\n\ndef run_experiment(variant):\n if variant['env_name'] == 'humanoid-rllab':\n env = normalize(HumanoidEnv())\n elif variant['env_name'] == 'swimmer-rllab':\n env = normalize(SwimmerEnv())\n elif variant['env_name'] == 'ant-rllab':\n env = normalize(AntEnv())\n else:\n env = normalize(GymEnv(variant['env_name']))\n\n pool = SimpleReplayBuffer(\n env_spec=env.spec, max_replay_buffer_size=variant['max_pool_size'])\n\n sampler = SimpleSampler(\n max_path_length=variant['max_path_length'],\n min_pool_size=variant['max_path_length'],\n batch_size=variant['batch_size'])\n\n base_kwargs = dict(\n epoch_length=variant['epoch_length'],\n n_epochs=variant['n_epochs'],\n n_train_repeat=variant['n_train_repeat'],\n eval_render=False,\n eval_n_episodes=1,\n sampler=sampler)\n\n M = variant['layer_size']\n qf = NNQFunction(env_spec=env.spec, hidden_layer_sizes=(M, M))\n\n policy = StochasticNNPolicy(env_spec=env.spec, hidden_layer_sizes=(M, M))\n\n algorithm = SQL(\n base_kwargs=base_kwargs,\n env=env,\n pool=pool,\n qf=qf,\n policy=policy,\n kernel_fn=adaptive_isotropic_gaussian_kernel,\n kernel_n_particles=variant['kernel_particles'],\n kernel_update_ratio=variant['kernel_update_ratio'],\n value_n_particles=variant['value_n_particles'],\n td_target_update_interval=variant['td_target_update_interval'],\n qf_lr=variant['qf_lr'],\n policy_lr=variant['policy_lr'],\n discount=variant['discount'],\n reward_scale=variant['reward_scale'],\n save_full_state=False)\n\n algorithm.train()\n\n\ndef launch_experiments(variant_generator, args):\n variants = variant_generator.variants()\n for i, variant in enumerate(variants):\n print('Launching {} experiments.'.format(len(variants)))\n full_experiment_name = variant['prefix']\n full_experiment_name += '-' + args.exp_name + '-' + str(i).zfill(2)\n\n run_sql_experiment(\n run_experiment,\n mode=args.mode,\n variant=variant,\n exp_prefix=variant['prefix'] + '/' + args.exp_name,\n exp_name=full_experiment_name,\n n_parallel=1,\n seed=variant['seed'],\n terminate_machine=True,\n log_dir=args.log_dir,\n snapshot_mode=variant['snapshot_mode'],\n snapshot_gap=variant['snapshot_gap'],\n sync_s3_pkl=True,\n python_command='/home/desteban/miniconda3/envs/softqlearning_haarnoja/bin/python',\n )\n\n\ndef main():\n args = parse_args()\n variant_generator = get_variants(args)\n launch_experiments(variant_generator, args)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5405825972557068, "alphanum_fraction": 0.5802575945854187, "avg_line_length": 45.78968811035156, "blob_id": "65c6cd74d27cedcd81e075dcd67003be21311755", "content_id": "3bc16f7ec9d1a469f55b4fd80251390393d0e443", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46276, "license_type": "permissive", "max_line_length": 174, "num_lines": 989, "path": "/scenarios/bigman-reach-drill.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport random\nimport signal\n\nimport numpy as np\nfrom robolearn.old_utils.sampler import Sampler\n\nfrom robolearn.old_agents import GPSAgent\nfrom robolearn.old_algos.gps.mdgps import MDGPS\nfrom robolearn.old_algos.gps.pigps import PIGPS\nfrom robolearn.old_algos.trajopt.dreps import DREPS\nfrom robolearn.old_algos.trajopt.ilqr import ILQR\nfrom robolearn.old_algos.trajopt.mdreps import MDREPS\nfrom robolearn.old_algos.trajopt.pi2 import PI2\nfrom robolearn.old_costs.cost_action import CostAction\nfrom robolearn.old_costs.cost_fk import CostFK\nfrom robolearn.old_costs.cost_state import CostState\nfrom robolearn.old_costs.cost_sum import CostSum\nfrom robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT\nfrom robolearn.old_costs.cost_utils import evall1l2term\nfrom robolearn.old_envs import BigmanEnv\nfrom robolearn.old_policies.lin_gauss_init import init_pd, init_demos\nfrom robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf\nfrom robolearn.old_policies.policy_opt.tf_models import tf_network\nfrom robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS\nfrom robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior\nfrom robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.print_utils import change_print_color\nfrom robolearn.old_utils.robot_model import RobotModel\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import Reset_condition_bigman_drill_gazebo\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_bigman_drill_condition\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_drill_relative_pose\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import create_hand_relative_pose\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import spawn_drill_gazebo\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import task_space_torque_control_demos, \\\n load_task_space_torque_control_demos\nfrom robolearn.old_utils.tasks.bigman.reach_drill_utils import task_space_torque_control_dual_demos, \\\n load_task_space_torque_control_dual_demos\nfrom robolearn.old_utils.traj_opt.traj_opt_dreps import TrajOptDREPS\nfrom robolearn.old_utils.traj_opt.traj_opt_lqr import TrajOptLQR\nfrom robolearn.old_utils.traj_opt.traj_opt_mdreps import TrajOptMDREPS\nfrom robolearn.old_utils.traj_opt.traj_opt_pi2 import TrajOptPI2\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n\ndef kill_everything(_signal=None, _frame=None):\n print(\"\\n\\033[1;31mThe script has been kill by the user!!\")\n os._exit(1)\n\nsignal.signal(signal.SIGINT, kill_everything)\n\n\n# ################## #\n# ################## #\n# ### PARAMETERS ### #\n# ################## #\n# ################## #\nlearning_algorithm = 'MDREPS'\n# Task parameters\nTs = 0.01\nTreach = 8\nTlift = 0 # 3.8\nTinter = 0 # 0.5\nTend = 0 # 0.7\n# EndTime = 4 # Using final time to define the horizon\nEndTime = Treach + Tinter + Tlift + Tend # Using final time to define the horizon\ninit_with_demos = False\ngenerate_dual_sets = True\ndemos_dir = None # 'TASKSPACE_TORQUE_CTRL_DEMO_2017-07-21_16:32:39'\ndual_dir = 'DUAL_DEMOS_2017-08-23_07:10:35'\n#seed = 6 previous 04/09/17 17:30 pm\nseed = 0\n\nrandom.seed(seed)\nnp.random.seed(seed)\n\n# BOX\ndrill_x = 0.70\ndrill_y = 0.00\ndrill_z = -0.1327\ndrill_yaw = 0 # Degrees\n#drill_size = [0.1, 0.1, 0.3]\ndrill_size = [0.11, 0.11, 0.3] # Beer\nfinal_drill_height = 0.0\ndrill_relative_pose = create_drill_relative_pose(drill_x=drill_x, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw)\n\n# Robot Model (It is used to calculate the IK cost)\n#robot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'\nrobot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf_file)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\n\ntouching_drill_config = np.array([0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.,\n 0., 0., 0.,\n 0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633,\n 0., 0.,\n 0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])\n\n# ################### #\n# ################### #\n# ### ENVIRONMENT ### #\n# ################### #\n# ################### #\nchange_print_color.change('BLUE')\nprint(\"\\nCreating Bigman environment...\")\n\n# Robot configuration\ninterface = 'ros'\nbody_part_active = 'RA'\nbody_part_sensed = 'RA'\ncommand_type = 'effort'\n\nif body_part_active == 'RA':\n hand_y = -drill_size[1]/2-0.02\n hand_z = drill_size[2]/2+0.02\n hand_name = RH_name\n hand_offset = r_soft_hand_offset\nelse:\n hand_y = drill_size[1]/2+0.02\n hand_z = drill_size[2]/2+0.02\n hand_name = LH_name\n hand_offset = l_soft_hand_offset\n\nhand_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0], hand_x=0.0, hand_y=hand_y, hand_z=hand_z, hand_yaw=0)\n\n\nobject_name = 'drill'\nobject_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0], hand_x=0.0, hand_y=hand_y, hand_z=hand_z, hand_yaw=0)\n\n\n\nreset_condition_bigman_drill_gazebo_fcn = Reset_condition_bigman_drill_gazebo()\n\nobservation_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'ros_topic': '/xbotcore/bigman/joint_states',\n # 'fields': ['link_position', 'link_velocity', 'effort'],\n 'fields': ['link_position', 'link_velocity'],\n # 'joints': bigman_params['joint_ids']['UB']},\n 'joints': bigman_params['joint_ids'][body_part_sensed]},\n\n {'name': 'prev_cmd',\n 'type': 'prev_cmd'},\n\n # {'name': 'ft_right_arm',\n # 'type': 'ft_sensor',\n # 'ros_topic': '/xbotcore/bigman/ft/r_arm_ft',\n # 'fields': ['force', 'torque']},\n\n {'name': 'distance_hand',\n 'type': 'fk_pose',\n 'body_name': hand_name,\n 'body_offset': hand_offset,\n 'target_offset': hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n {'name': 'distance_object',\n 'type': 'object_pose',\n 'body_name': object_name,\n 'target_rel_pose': drill_relative_pose,\n 'fields': ['orientation', 'position']},\n ]\n\nstate_active = [{'name': 'joint_state',\n 'type': 'joint_state',\n 'fields': ['link_position', 'link_velocity'],\n 'joints': bigman_params['joint_ids'][body_part_sensed]},\n\n {'name': 'prev_cmd',\n 'type': 'prev_cmd'},\n\n {'name': 'distance_hand',\n 'type': 'fk_pose',\n 'body_name': hand_name,\n 'body_offset': hand_offset,\n 'target_offset': hand_rel_pose,\n 'fields': ['orientation', 'position']},\n\n {'name': 'distance_object',\n 'type': 'object_pose',\n 'body_name': object_name,\n 'target_rel_pose': drill_relative_pose,\n 'fields': ['orientation', 'position']},\n ]\n\noptional_env_params = {\n 'temp_object_name': 'drill'\n}\n\n# Spawn Box first because it is simulation\nspawn_drill_gazebo(drill_relative_pose, drill_size=drill_size)\n\n\n# Create a BIGMAN ROS EnvInterface\nbigman_env = BigmanEnv(interface=interface, mode='simulation',\n body_part_active=body_part_active, command_type=command_type,\n observation_active=observation_active,\n state_active=state_active,\n cmd_freq=int(1/Ts),\n robot_dyn_model=robot_model,\n optional_env_params=optional_env_params,\n reset_simulation_fcn=reset_condition_bigman_drill_gazebo_fcn)\n # reset_simulation_fcn=reset_condition_bigman_drill_gazebo)\n\naction_dim = bigman_env.action_dim\nstate_dim = bigman_env.state_dim\nobservation_dim = bigman_env.obs_dim\n\nprint(\"Bigman Environment OK. body_part_active:%s (action_dim=%d). Command_type:%s\" % (body_part_active, action_dim,\n command_type))\n\n# ################# #\n# ################# #\n# ##### AGENT ##### #\n# ################# #\n# ################# #\nchange_print_color.change('CYAN')\nprint(\"\\nCreating Bigman Agent...\")\n\npolicy_params = {\n 'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp\n 'network_params': {\n 'n_layers': 1, # Hidden layers??\n 'dim_hidden': [40], # List of size per n_layers\n 'obs_names': bigman_env.get_obs_info()['names'],\n 'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor\n },\n # Initialization.\n 'init_var': 0.1, # Initial policy variance.\n 'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)\n # Solver hyperparameters.\n 'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?\n 'batch_size': 15,\n 'lr': 0.001, # Base learning rate (by default it's fixed).\n 'lr_policy': 'fixed', # Learning rate policy.\n 'momentum': 0.9, # Momentum.\n 'weight_decay': 0.005, # Weight decay.\n 'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).\n # set gpu usage.\n 'use_gpu': 1, # Whether or not to use the GPU for training.\n 'gpu_id': 0,\n 'random_seed': 1,\n 'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??\n 'gpu_mem_percentage': 0.2,\n # 'weights_file_prefix': EXP_DIR + 'policy',\n}\npolicy_opt = {\n 'type': PolicyOptTf,\n 'hyperparams': policy_params\n }\n\nbigman_agent = GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy_opt=policy_opt)\nprint(\"Bigman Agent:%s OK\\n\" % type(bigman_agent))\n\n\n# ################# #\n# ################# #\n# ##### COSTS ##### #\n# ################# #\n# ################# #\n# Action Cost\nact_cost = {\n 'type': CostAction,\n 'wu': np.ones(action_dim) * 1e-4,\n 'target': None, # Target action value\n}\n\n# State Cost\ntarget_distance_hand = np.zeros(6)\n# target_distance_hand[-2] = -0.02 # Yoffset\n# target_distance_hand[-1] = 0.1 # Zoffset\n\ntarget_distance_object = np.zeros(6)\nstate_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 0.0, # Weight for l2 norm\n 'alpha': 1e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'distance_object': {\n # 'wp': np.ones_like(target_state), # State weights - must be set.\n 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None, # (12, 3),\n 'data_idx': bigman_env.get_state_info(name='distance_object')['idx']\n },\n },\n}\nstate_final_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 0.0, # Weight for l2 norm\n 'alpha': 1e-2, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 10.0, # Weight multiplier on final time step.\n 'data_types': {\n 'distance_object': {\n # 'wp': np.ones_like(target_state), # State weights - must be set.\n 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None, # (12, 3),\n 'data_idx': bigman_env.get_state_info(name='distance_object')['idx']\n },\n },\n}\n\nfk_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nfk_l1_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nfk_l2_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1, # 10\n}\n\nfk_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 10.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 50,\n}\n\nfk_l1_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 10.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 50,\n}\n\nfk_l2_final_cost = {\n 'type': CostFK,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'target_pose': target_distance_hand,\n 'tgt_data_type': 'state', # 'state' or 'observation'\n 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n 'op_point_name': hand_name,\n 'op_point_offset': hand_offset,\n 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n 'joint_ids': bigman_params['joint_ids'][body_part_active],\n 'robot_model': robot_model,\n 'wp': np.array([1.0, 1.0, 1.0, 10.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n 'evalnorm': evall1l2term,\n #'evalnorm': evallogl2term,\n 'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 50,\n}\n\ncost_sum = {\n 'type': CostSum,\n # 'costs': [act_cost, state_cost_distance],\n # 'weights': [1.0e-2, 1.0e-0],\n # 'costs': [act_cost, LAfk_cost, RAfk_cost, state_cost],\n # 'weights': [1.0e-2, 1.0e-0, 1.0e-0, 5.0e-1],\n #'costs': [act_cost, LAfk_cost, LAfk_final_cost],\n #'weights': [1.0e-1, 1.0e-0, 1.0e-0],\n 'costs': [act_cost, fk_l1_cost, fk_l2_cost, fk_l1_final_cost, fk_l2_final_cost, state_cost_distance, state_final_cost_distance],\n 'weights': [1.0e-1, 1.5e-1, 1.0e-0, 1.5e-1, 1.0e-0, 5.0e+0, 1.0e+1],\n # 'costs': [act_cost, state_cost],#, LAfk_cost, RAfk_cost],\n # 'weights': [0.1, 5.0],\n}\n\n\n# ########## #\n# ########## #\n# Conditions #\n# ########## #\n# ########## #\ndrill_relative_poses = [] # Used only in dual demos\n\n# q0 = np.zeros(31)\n# q0[15] = np.deg2rad(25)\n# q0[16] = np.deg2rad(40)\n# q0[18] = np.deg2rad(-75)\n# #q0[15:15+7] = [0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633]\n# q0[24] = np.deg2rad(25)\n# q0[25] = np.deg2rad(-40)\n# q0[27] = np.deg2rad(-75)\n# #q0[24:24+7] = [0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633]\n# drill_pose0 = drill_relative_pose.copy()\n# condition0 = create_bigman_drill_condition(q0, drill_pose0, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition0)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose0)\n# drill_relative_poses.append(drill_pose0)\n\n# # q1 = q0.copy()\n# q1 = np.zeros(31)\n# q1[15] = np.deg2rad(25)\n# q1[16] = np.deg2rad(40)\n# q1[18] = np.deg2rad(-45)\n# q1[20] = np.deg2rad(-5)\n# q1[24] = np.deg2rad(25)\n# q1[25] = np.deg2rad(-40)\n# q1[27] = np.deg2rad(-45)\n# q1[29] = np.deg2rad(-5)\n# drill_pose1 = create_drill_relative_pose(drill_x=drill_x+0.02, drill_y=drill_y+0.02, drill_z=drill_z, drill_yaw=drill_yaw+5)\n# condition1 = create_bigman_drill_condition(q1, drill_pose1, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition1)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose1)\n# drill_relative_poses.append(drill_pose1)\n\n# # q2 = q0.copy()\n# q2 = np.zeros(31)\n# q2[15] = np.deg2rad(25)\n# q2[16] = np.deg2rad(30)\n# q2[18] = np.deg2rad(-50)\n# q2[21] = np.deg2rad(-45)\n# q2[24] = np.deg2rad(25)\n# q2[25] = np.deg2rad(-30)\n# q2[27] = np.deg2rad(-50)\n# q2[30] = np.deg2rad(-45)\n# drill_pose2 = create_drill_relative_pose(drill_x=drill_x-0.02, drill_y=drill_y-0.02, drill_z=drill_z, drill_yaw=drill_yaw-5)\n# condition2 = create_bigman_drill_condition(q2, drill_pose2, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition2)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose2)\n# drill_relative_poses.append(drill_pose2)\n\n# q3 = q0.copy()\nq3 = np.zeros(31)\nq3[15] = np.deg2rad(10)\nq3[16] = np.deg2rad(10)\nq3[18] = np.deg2rad(-35)\n# q3[24] = np.deg2rad(10)\n# q3[25] = np.deg2rad(-10)\n# q3[27] = np.deg2rad(-35)\n# q3[24] = np.deg2rad(-10)\n# #q3[25] = np.deg2rad(-20)\n# #q3[25] = np.deg2rad(-10)\n# q3[25] = np.deg2rad(-30)\n# q3[26] = np.deg2rad(0)\n# q3[27] = np.deg2rad(-85)\n# q3[28] = np.deg2rad(0)\n# q3[29] = np.deg2rad(0)\n# q3[30] = np.deg2rad(0)\nq3[24] = np.deg2rad(20)\nq3[25] = np.deg2rad(-55)\nq3[26] = np.deg2rad(0)\nq3[27] = np.deg2rad(-95)\nq3[28] = np.deg2rad(0)\nq3[29] = np.deg2rad(0)\nq3[30] = np.deg2rad(0)\n#drill_pose3 = create_drill_relative_pose(drill_x=drill_x-0.06, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw+10)\ndrill_pose3 = create_drill_relative_pose(drill_x=drill_x+0.05, drill_y=drill_y-0.3, drill_z=drill_z, drill_yaw=drill_yaw+10)\ncondition3 = create_bigman_drill_condition(q3, drill_pose3, bigman_env.get_state_info(),\n joint_idxs=bigman_params['joint_ids'][body_part_sensed])\nbigman_env.add_condition(condition3)\nreset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose3)\ndrill_relative_poses.append(drill_pose3)\n\n\n# # q4 = q0.copy()\n# q4 = np.zeros(31)\n# drill_pose4 = create_drill_relative_pose(drill_x=drill_x, drill_y=drill_y, drill_z=drill_z, drill_yaw=drill_yaw-5)\n# condition4 = create_bigman_drill_condition(q4, drill_pose4, bigman_env.get_state_info(),\n# joint_idxs=bigman_params['joint_ids'][body_part_sensed])\n# bigman_env.add_condition(condition4)\n# reset_condition_bigman_drill_gazebo_fcn.add_reset_poses(drill_pose4)\n# drill_relative_poses.append(drill_pose4)\n\n\n\n# #################### #\n# #################### #\n# ## DEMONSTRATIONS ## #\n# #################### #\n# #################### #\nif init_with_demos is True:\n print(\"\")\n change_print_color.change('GREEN')\n if demos_dir is None:\n task_space_torque_control_demos_params = {\n 'n_samples': 5,\n 'conditions_to_sample': range(len(bigman_env.get_conditions())),\n 'Treach': Treach,\n 'Tlift': Tlift,\n 'Tinter': Tinter,\n 'Tend': Tend,\n 'Ts': Ts,\n 'noisy': False,\n 'noise_hyperparams': {\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n },\n 'bigman_env': bigman_env,\n 'drill_relative_pose': drill_relative_pose,\n 'drill_size': drill_size,\n 'final_drill_height': final_drill_height,\n }\n demos_samples = task_space_torque_control_demos(**task_space_torque_control_demos_params)\n bigman_env.reset(time=2, cond=0)\n else:\n demos_samples = load_task_space_torque_control_demos(demos_dir)\n print('Demos samples has been obtained from directory %s' % demos_dir)\nelse:\n demos_samples = None\n\n# DUAL SAMPLES\nif generate_dual_sets is True:\n print(\"\")\n change_print_color.change('GREEN')\n if dual_dir is None:\n task_space_torque_control_dual_params = {\n '_active_joints': 'RA',\n 'n_good_samples': 5,\n 'n_bad_samples': 5,\n 'conditions_to_sample': range(len(bigman_env.get_conditions())),\n 'Treach': Treach,\n 'Tlift': Tlift,\n 'Tinter': Tinter,\n 'Tend': Tend,\n 'Ts': Ts,\n 'noisy': False,\n 'noise_hyperparams': {\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n },\n 'bigman_env': bigman_env,\n 'drill_relative_poses': drill_relative_poses, # THIS\n 'drill_relative_pose_cond_id': range(-8, -1), # OR THIS\n 'drill_size': drill_size,\n 'final_drill_height': final_drill_height,\n # offsets [roll, pitch, yaw, x, y, z]\n #'good_offsets': [[0, 0, 0, 0.25, -0.25, drill_size[2]/2+0.1],\n 'good_offsets': [[-45, 0, 0, 0, -0.13, 0.17],\n [0, 0, 0, 0, -0.12, 0.17],\n [30, 0, 0, 0, -0.13, 0.15],\n [3, 0, 0, 0, -0.14, 0.15],\n [-8, 0, 0, 0, -0.14, 0.14],\n ],\n 'bad_offsets': [[-10, 0, 0, 0.05, 0.1, 0.17],\n [2, 0, 0, 0, 0.1, 0.10],\n [25, 0, -5, 0, 0.0, 0.20],\n [1, 10, 2, -0.1, 0.14, 0.21],\n [3, 10, 40, 0.05, 0.05, 0.18],\n ],\n }\n\n good_trajs, bad_trajs = task_space_torque_control_dual_demos(**task_space_torque_control_dual_params)\n bigman_env.reset(time=2, cond=0)\n\n else:\n good_trajs, bad_trajs = load_task_space_torque_control_dual_demos(dual_dir)\n print('Good/bad dual samples has been obtained from directory %s' % dual_dir)\n\nelse:\n good_trajs = None\n bad_trajs = None\n\n\n\n# ######################## #\n# ######################## #\n# ## LEARNING ALGORITHM ## #\n# ######################## #\n# ######################## #\nchange_print_color.change('YELLOW')\nprint(\"\\nConfiguring learning algorithm...\\n\")\n\n# Learning params\nresume_training_itr = None # Resume from previous training iteration\n# data_files_dir = 'GPS_2017-09-01_15:22:55' # None # In case we want to resume from previous training\ndata_files_dir = None # 'GPS_2017-09-05_13:07:23' # None # In case we want to resume from previous training\n\nif demos_samples is None:\n # # init_traj_distr values can be lists if they are different for each condition\n # init_traj_distr = {'type': init_lqr,\n # # Parameters to calculate initial COST function based on stiffness\n # 'init_var': 3.0e-1, # Initial Variance\n # 'stiffness': 5.0e-1, # Stiffness (multiplies q)\n # 'stiffness_vel': 0.01, # 0.5, # Stiffness_vel*stiffness (multiplies qdot)\n # 'final_weight': 10.0, # Multiplies cost at T\n # # Parameters for guessing dynamics\n # 'init_acc': np.zeros(action_dim), # dU vector(np.array) of accelerations, default zeros.\n # #'init_gains': 1.0*np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n # #'init_gains': 1.0/np.array([5000.0, 8000.0, 5000.0, 5000.0, 300.0, 2000.0, 300.0]), # dU vector(np.array) of gains, default ones.\n # 'init_gains': np.ones(action_dim), # dU vector(np.array) of gains, default ones.\n # }\n init_traj_distr = {'type': init_pd,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active]))*0.3e-1, # Initial variance (Default:10)\n 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n #'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 2.0e-1, 2.0e-1, 2.0e-1])*1.0e+0,\n #'init_var': np.ones(7)*0.5,\n #'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Initial variance (Default:10)\n # 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,\n # 3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0, # Initial variance (Default:10)\n 'pos_gains': 0.001, #np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])*1.0e+1, # 0.001, # Position gains (Default:10)\n 'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains\n 'init_action_offset': None,\n 'dJoints': len(bigman_params['joint_ids'][body_part_sensed]), # Total joints in state\n 'state_to_pd': 'joints', # Joints\n 'dDistance': 6,\n }\nelse:\n init_traj_distr = {'type': init_demos,\n 'sample_lists': demos_samples\n }\n\n# Trajectory Optimization Options\ntraj_opt_lqr = {'type': TrajOptLQR,\n 'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n }\n\ntraj_opt_pi2 = {'type': TrajOptPI2,\n 'del0': 1e-4, # Dual variable updates for non-PD Q-function.\n 'kl_threshold': 1.0, # KL-divergence threshold between old and new policies.\n 'covariance_damping': 10.0, # 2.0, # If greater than zero, covariance is computed as a multiple of the old\n # covariance. Multiplier is taken to the power (1 / covariance_damping).\n # If greater than one, slows down convergence and keeps exploration noise high for more iterations.\n 'min_temperature': 0.001, # Minimum bound of the temperature optimization for the soft-max\n # probabilities of the policy samples.\n 'use_sumexp': False,\n 'pi2_use_dgd_eta': True, # False,\n 'pi2_cons_per_step': True,\n }\n\ntraj_opt_dreps = {'type': TrajOptDREPS,\n 'epsilon': 1.0, # KL-divergence threshold between old and new policies.\n 'xi': 5.0,\n 'chi': 2.0,\n 'dreps_cons_per_step': True,\n 'min_eta': 0.001, # Minimum bound of the temperature optimization for the soft-max\n 'covariance_damping': 2.0,\n 'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).\n }\n\ntraj_opt_mdreps = {'type': TrajOptMDREPS,\n 'good_const': False, # Use good constraints\n 'bad_const': False, # Use bad constraints\n 'del0': 1e-4, # Eta updates for non-SPD Q-function (non-SPD correction step).\n 'del0_good': 1e-4, # Omega updates for non-SPD Q-function (non-SPD correction step).\n 'del0_bad': 1e-8, # Nu updates for non-SPD Q-function (non-SPD correction step).\n # 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used\n 'min_eta': 1e-8, # At min_eta, kl_div > kl_step\n 'max_eta': 1e16, # At max_eta, kl_div < kl_step\n 'min_omega': 1e-8, # At min_omega, kl_div > kl_step\n 'max_omega': 1e16, # At max_omega, kl_div < kl_step\n 'min_nu': 1e-8, # At min_nu, kl_div > kl_step\n 'max_nu': 2.0e1, # At max_nu, kl_div < kl_step,\n 'step_tol': 0.1,\n 'bad_tol': 0.2,\n 'good_tol': 0.3,\n 'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.\n 'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.\n 'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.\n }\n\n# Dynamics\nlearned_dynamics = {'type': DynamicsLRPrior,\n 'regularization': 1e-6,\n 'prior': {\n 'type': DynamicsPriorGMM,\n 'max_clusters': 20, # Maximum number of clusters to fit.\n 'min_samples_per_cluster': 40, # Minimum samples per cluster.\n 'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.\n 'strength': 1.0, # Adjusts the strength of the prior.\n },\n }\n\n# GPS algo hyperparameters\nmdgps_hyperparams = {'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add',\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n }\n\npigps_hyperparams = {'init_pol_wt': 0.01,\n 'policy_sample_mode': 'add'\n }\n\nilqr_hyperparams = {'inner_iterations': 1,\n }\n\npi2_hyperparams = {'inner_iterations': 1,\n 'fit_dynamics': False, # Dynamics fitting is not required for PI2.\n }\n\ndreps_hyperparams = {'inner_iterations': 1,\n 'good_samples': good_trajs,\n 'bad_samples': bad_trajs,\n }\n\nmdreps_hyperparams = {'inner_iterations': 1,\n 'good_samples': good_trajs,\n 'bad_samples': bad_trajs,\n 'n_bad_samples': 2, # Number of bad samples per each trajectory\n 'n_good_samples': 2, # Number of bad samples per each trajectory\n 'base_kl_bad': 2.5, # (chi) to be used with multiplier | kl_div_b >= kl_bad\n 'base_kl_good': 1.0, # (xi) to be used with multiplier | kl_div_g <= kl_good\n 'bad_traj_selection_type': 'always', # 'always', 'only_traj'\n 'good_traj_selection_type': 'always', # 'always', 'only_traj'\n 'init_eta': 4.62,\n 'init_nu': 0.5,\n 'init_omega': 1.0,\n 'min_bad_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_bad in LQR)\n 'max_bad_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_bad in LQR)\n 'min_good_mult': 0.01, # Min possible value of step multiplier (multiplies base_kl_good in LQR)\n 'max_good_mult': 20.0, # Max possible value of step multiplier (multiplies base_kl_good in LQR)\n 'min_bad_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n 'min_good_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0e-00,\n 'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)\n 'policy_sample_mode': 'add',\n 'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment\n 'policy_prior': {'type': ConstantPolicyPrior,\n 'strength': 1e-4,\n },\n }\n\n\nif learning_algorithm.upper() == 'MDGPS':\n gps_algo_hyperparams = mdgps_hyperparams\n traj_opt_method = traj_opt_lqr\n test_after_iter = True\n sample_on_policy = False\n use_global_policy = True\n\nelif learning_algorithm.upper() == 'PIGPS':\n mdgps_hyperparams.update(pigps_hyperparams)\n gps_algo_hyperparams = mdgps_hyperparams\n traj_opt_method = traj_opt_pi2\n test_after_iter = True\n sample_on_policy = False\n use_global_policy = True\n\nelif learning_algorithm.upper() == 'ILQR':\n gps_algo_hyperparams = ilqr_hyperparams\n traj_opt_method = traj_opt_lqr\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\n\nelif learning_algorithm.upper() == 'PI2':\n gps_algo_hyperparams = pi2_hyperparams\n traj_opt_method = traj_opt_pi2\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\n\nelif learning_algorithm.upper() == 'DREPS':\n gps_algo_hyperparams = dreps_hyperparams\n traj_opt_method = traj_opt_dreps\n test_after_iter = False\n sample_on_policy = False\n use_global_policy = False\n\nelif learning_algorithm.upper() == 'MDREPS':\n gps_algo_hyperparams = mdreps_hyperparams\n traj_opt_method = traj_opt_mdreps\n sample_on_policy = False\n test_after_iter = False\n use_global_policy = False\n #use_global_policy = False\nelse:\n raise AttributeError(\"Wrong learning algorithm %s\" % learning_algorithm.upper())\n\n\ngps_hyperparams = {\n 'T': int(EndTime/Ts), # Total points\n 'dt': Ts,\n 'iterations': 100, # 100 # 2000 # GPS episodes, \"inner iterations\" --> K iterations\n 'test_after_iter': test_after_iter, # If test the learned policy after an iteration in the RL algorithm\n 'test_samples': 3, # Samples from learned policy after an iteration PER CONDITION (only if 'test_after_iter':True)\n # Samples\n 'num_samples': 5, # 20 # Samples for exploration trajs --> N samples\n 'noisy_samples': True,\n 'sample_on_policy': sample_on_policy, # Whether generate on-policy samples or off-policy samples\n #'noise_var_scale': np.array([5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2]), # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n #'noise_var_scale': np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*10, # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)\n 'smooth_noise': True, # Apply Gaussian filter to noise generated\n #'smooth_noise_var': 5.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_var': 8.0e+0, # np.power(2*Ts, 2), # Variance to apply to Gaussian Filter. In Kumar (2016) paper, it is the std dev of 2 Ts\n 'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1\n 'noise_var_scale': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize\n 'cost': cost_sum,\n # Conditions\n 'conditions': len(bigman_env.get_conditions()), # Total number of initial conditions\n 'train_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for training\n 'test_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for testing\n # KL step (epsilon)\n 'kl_step': 0.2, # Kullback-Leibler step (base_step)\n 'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step in LQR)\n 'max_step_mult': 10.0, # Previous 23/08 -> 1.0 #3 # 10.0, # Max possible value of step multiplier (multiplies kl_step in LQR)\n # Others\n 'gps_algo_hyperparams': gps_algo_hyperparams,\n 'init_traj_distr': init_traj_distr,\n 'fit_dynamics': True,\n 'dynamics': learned_dynamics,\n 'initial_state_var': 1e-6, #1e-2,# 1e-6, # Max value for x0sigma in trajectories # TODO: CHECK THIS VALUE, maybe it is too low\n 'traj_opt': traj_opt_method,\n 'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization # CHECK THIS VALUE!!!, I AM USING ZERO!!\n 'use_global_policy': use_global_policy,\n 'data_files_dir': data_files_dir,\n}\n\n\nif learning_algorithm.upper() == 'MDGPS':\n learn_algo = MDGPS(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'PIGPS':\n learn_algo = PIGPS(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'ILQR':\n learn_algo = ILQR(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'PI2':\n learn_algo = PI2(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'DREPS':\n learn_algo = DREPS(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nelif learning_algorithm.upper() == 'MDREPS':\n learn_algo = MDREPS(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n\nelse:\n raise AttributeError(\"Wrong learning algorithm %s\" % learning_algorithm.upper())\n\nprint(\"Learning algorithm: %s OK\\n\" % type(learn_algo))\n\n# import numpy as np\n# dX = bigman_env.state_dim\n# dU = bigman_env.action_dim\n# dO = bigman_env.obs_dim\n# T = gps_hyperparams['T']\n# all_actions = np.zeros((T, dU))\n# all_states = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dX))\n# all_obs = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dO))\n# sample = Sample(bigman_env, T)\n# sample.set_acts(all_actions) # Set all actions at the same time\n# sample.set_obs(all_obs) # Set all obs at the same time\n# sample.set_states(all_states) # Set all states at the same time\n# costs = learn_algo._eval_conditions_sample_list_cost([SampleList([sample])])\n# raw_input('zacataaaaaaaaa')\n\n\n# Optimize policy using learning algorithm\nprint(\"Running Learning Algorithm!!!\")\ntraining_successful = learn_algo.run(resume_training_itr)\nif training_successful:\n print(\"Learning Algorithm has finished SUCCESSFULLY!\")\nelse:\n print(\"Learning Algorithm has finished WITH ERRORS!\")\n\n\n# ############################## #\n# ############################## #\n# ## SAMPLE FROM FINAL POLICY ## #\n# ############################## #\n# ############################## #\nif training_successful:\n conditions_to_sample = gps_hyperparams['test_conditions']\n change_print_color.change('GREEN')\n n_samples = 1\n noisy = False\n sampler_hyperparams = {\n 'noisy': noisy,\n 'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU\n 'smooth_noise': False, # Whether or not to perform smoothing of noise\n 'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01\n 'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.\n 'T': int(EndTime/Ts)*1, # Total points\n 'dt': Ts\n }\n #sampler = Sampler(bigman_agent.policy, bigman_env, **sampler_hyperparams)\n sampler = Sampler(learn_algo.cur[0].traj_distr, bigman_env, **sampler_hyperparams)\n print(\"Sampling from final policy!!!\")\n sample_lists = list()\n for cond_idx in conditions_to_sample:\n raw_input(\"\\nSampling %d times from condition %d and with policy:%s (noisy:%s). \\n Press a key to continue...\" %\n (n_samples, cond_idx, type(bigman_agent.policy), noisy))\n sample_list = sampler.take_samples(n_samples, cond=cond_idx, noisy=noisy)\n # costs = learn_algo._eval_conditions_sample_list_cost([sample_list])\n # # print(costs)\n # # raw_input('pppp')\n # sample_lists.append(sample_list)\n\n bigman_env.reset(time=1, cond=0)\n\n\n\n\nprint(\"The script has finished!\")\nos._exit(0)\n\n" }, { "alpha_fraction": 0.5214285850524902, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 20.875, "blob_id": "de2f9a48456c1846558f5409c92ccc731dbb7185", "content_id": "444162cfbdb8bdd85b4e89ccaf011bcbcbda4338", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 700, "license_type": "permissive", "max_line_length": 82, "num_lines": 32, "path": "/scenarios/humanoids2018/scripts/reacher_gps1.sh", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# #### #\n# VARS #\n# #### #\nscenario='mdgps'\n\n#seeds=(0 50 100)\n#init_index=0\n#end_index=3\n#seeds=(\"${seeds[@]:${init_index}:${end_index}}\")\n\ndefault_seeds=(0) # 50 100)\nseeds=(\"${@:-${default_seeds[@]}}\")\ntotal_seeds=${#seeds[@]}\n\necho \"Reacher GPS\"\necho \"Total seeds: ${#seeds[@]}\"\necho \"Experiment seeds: ${seeds[@]}\"\necho \"\"\n\nfor index in ${!seeds[@]}; do\n seed=${seeds[index]}\n# script_index=$((index+init_index))\n script_index=$((index))\n echo \"************************************\"\n echo \"Running '${scenario}' $((script_index+1))/${total_seeds}: Seed: ${seed}\"\n\n python main.py --scenario=${scenario} --seed=${seed} \\\n --run_num=${script_index}\n\ndone\n" }, { "alpha_fraction": 0.581473708152771, "alphanum_fraction": 0.6181052923202515, "avg_line_length": 24, "blob_id": "8d91aa8c03603c37be009a6cf8df80d79e8f3e80", "content_id": "3c916cedb30c8408b6f6887d56e0df885898465a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2375, "license_type": "permissive", "max_line_length": 70, "num_lines": 95, "path": "/examples/rl_algos/spinningup/reacher/test_ppo.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport unittest\nfrom functools import partial\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom spinup import ppo\nfrom spinup.algos.ppo.core import mlp_actor_critic\n\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn_gym_envs.pybullet import Reacher2D3DofGoalCompoEnv\n\nfrom spinup.utils.run_utils import setup_logger_kwargs\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 5\nPATHS_PER_EVAL = 2\nBATCH_SIZE = 256\n\nSEED = 660\nSUBTASK = None\n\nEXP_NAME = 'prueba_reacher_ppo'\n\nenv_params = dict(\n is_render=False,\n # is_render=True,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n obs_with_ori=False,\n obs_with_goal=True,\n # obs_with_goal=False,\n # goal_pose=(0.65, 0.65),\n goal_pose=(0.65, 0.35),\n # rdn_goal_pos=True,\n rdn_goal_pos=False,\n robot_config=None,\n rdn_robot_config=True,\n goal_cost_weight=4.0e0,\n ctrl_cost_weight=5.0e-1,\n goal_tolerance=0.01,\n use_log_distances=True,\n log_alpha=1e-6,\n # max_time=PATH_LENGTH*DT,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n half_env=True,\n subtask=SUBTASK,\n seed=SEED,\n)\n\n\ndef main():\n # Environment Fcn\n env_fn = lambda: \\\n Reacher2D3DofGoalCompoEnv(**env_params)\n # Logger kwargs\n logger_kwargs = setup_logger_kwargs(EXP_NAME, SEED)\n\n with tf.Graph().as_default():\n ppo(\n env_fn,\n actor_critic=mlp_actor_critic,\n ac_kwargs=dict(hidden_sizes=(128, 128, 128)),\n seed=SEED,\n steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n epochs=10000,\n gamma=0.99, # Discount factor (0-1)\n clip_ratio=0.2, # clip pol objective (0.1-0.3)\n pi_lr=3e-4,\n vf_lr=1e-3,\n train_pi_iters=80, # Max grad steps in pol loss per epoch\n train_v_iters=80, # Max grad steps in val loss per epoch\n lam=0.97, # Lambda for GAE-Lambda (0-1)\n max_ep_len=PATH_LENGTH, # Max length for trajectory\n target_kl=0.01, # KLdiv between new and old policies\n logger_kwargs=logger_kwargs,\n save_freq=10,\n )\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5414648652076721, "alphanum_fraction": 0.5792978405952454, "avg_line_length": 26.53333282470703, "blob_id": "2444137aba521e0fb5807ccc0b21ae1c7357faf1", "content_id": "cdd3e3b343c83c6d9ba001730412d959e7b0fc2d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3304, "license_type": "permissive", "max_line_length": 76, "num_lines": 120, "path": "/examples/rl_algos/spinningup/reacher/test_sac.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom spinup import sac\nfrom spinup.algos.sac.core import mlp_actor_critic\n\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn_gym_envs.pybullet import Reacher2D3DofGoalCompoEnv\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\n\nfrom spinup.utils.run_utils import setup_logger_kwargs\n\nEPOCHS = 500\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.001\nFRAME_SKIP = 10\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 1\nPATHS_PER_EVAL = 2\nBATCH_SIZE = 256\n\nSEED = 610\nSUBTASK = None\n\nEXP_NAME = 'prueba_reacher1_sac'\n\nenv_params = dict(\n is_render=False,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n obs_with_ori=False,\n obs_with_goal=True,\n # obs_with_goal=False,\n # goal_pose=(0.65, 0.65),\n goal_pose=(0.65, 0.35),\n # rdn_goal_pos=True,\n rdn_goal_pos=False,\n robot_config=None,\n rdn_robot_config=True,\n goal_cost_weight=4.0e0,\n ctrl_cost_weight=5.0e-1,\n goal_tolerance=0.01,\n use_log_distances=True,\n log_alpha=1e-6,\n # max_time=PATH_LENGTH*DT,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n half_env=True,\n subtask=SUBTASK,\n seed=SEED,\n)\n\n\ndef main():\n # Environment Fcn\n env_fn = lambda: \\\n NormalizedBoxEnv(\n Reacher2D3DofGoalCompoEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n\n # Logger kwargs\n logger_kwargs = setup_logger_kwargs(EXP_NAME, SEED)\n\n with tf.Graph().as_default():\n sac(\n env_fn,\n actor_critic=mlp_actor_critic,\n ac_kwargs=dict(hidden_sizes=(128, 128, 128)),\n seed=SEED,\n steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n epochs=EPOCHS,\n replay_size=int(1e6),\n gamma=0.99,\n polyak=0.995, # Polyak avg target pol (0-1)\n lr=1e-3,\n alpha=0.2, # entropy regularization coefficient (inv rew scale)\n batch_size=BATCH_SIZE,\n start_steps=10000,\n max_ep_len=PATH_LENGTH, # Max length for trajectory\n logger_kwargs=logger_kwargs,\n save_freq=1\n )\n # ppo(\n # env_fn,\n # actor_critic=mlp_actor_critic,\n # ac_kwargs=dict(hidden_sizes=(128, 128, 128)),\n # seed=SEED,\n # steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n # epochs=10000,\n # gamma=0.99, # Discount factor (0-1)\n # clip_ratio=0.2, # clip pol objective (0.1-0.3)\n # pi_lr=3e-4,\n # vf_lr=1e-3,\n # train_pi_iters=80, # Max grad steps in pol loss per epoch\n # train_v_iters=80, # Max grad steps in val loss per epoch\n # lam=0.97, # Lambda for GAE-Lambda (0-1)\n # max_ep_len=PATH_LENGTH, # Max length for trajectory\n # target_kl=0.01, # KLdiv between new and old policies\n # logger_kwargs=logger_kwargs,\n # save_freq=10,\n # )\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5102653503417969, "alphanum_fraction": 0.5335446000099182, "avg_line_length": 44.074588775634766, "blob_id": "378ff67e82537d8722ad53063f6a0b5fa73f3fdb", "content_id": "e950d8b05887c260300e478edd21618c494983e8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35654, "license_type": "permissive", "max_line_length": 134, "num_lines": 791, "path": "/scenarios/tests/load_plot_iteration_data.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import MaxNLocator\nimport pickle\nimport math\nimport os\nfrom robolearn.old_utils.plot_utils import plot_sample_list, plot_sample_list_distribution\nfrom robolearn.old_algos.gps.gps_utils import IterationData\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nimport scipy.stats\n\n#gps_directory_name = 'GPS_2017-08-04_20:32:12' # l1: 1.0, l2: 1.0e-3\n#gps_directory_name = 'GPS_2017-08-07_16:05:32' # l1: 1.0, l2: 0.0\ngps_directory_name = 'GPS_2017-08-07_19:35:58' # l1: 1.0, l2: 1.0\ngps_directory_name = 'GPS_2017-08-14_10:35:40' # dummy test\ngps_directory_name = 'GPS_2017-08-18_08:41:50' # ILQR 4 cond, 2 samples\ngps_directory_name = 'GPS_2017-08-18_15:44:31' # PI2 1 cond, 5 samples, pi2_use_dgd_eta=False\ngps_directory_name = 'GPS_2017-08-18_16:54:52' # PI2 1 cond, 5 samples, pi2_use_dgd_eta=True\ngps_directory_name = 'GPS_2017-08-18_17:42:42' # ILQR 1 cond, 5 samples\ngps_directory_name = 'GPS_2017-08-18_19:04:25' # MDGPS, 100iter, 5samp, max_step=1, 4cond\ngps_directory_name = 'GPS_2017-08-21_14:51:32' # PI2 1 cond, 5 samples, pi2_use_dgd_eta=True, cov_damping=5\ngps_directory_name = 'GPS_2017-08-21_15:49:11' # PI2 1 cond, 5 samples, pi2_use_dgd_eta=True, cov_damping=10\ngps_directory_name = 'GPS_2017-08-22_07:13:33' # MDGPS, 100iter, 5samp, max_step=1, 1cond\ngps_directory_name = 'GPS_2017-08-23_08:10:47' # ILQR\ngps_directory_name = 'GPS_2017-08-23_15:40:01' # ILQR\ngps_directory_name = 'GPS_2017-08-23_18:13:35' # Off-policy MDGPS\ngps_directory_name = 'GPS_2017-08-24_14:38:45' # On-policy MDGPS\ngps_directory_name = 'GPS_2017-08-31_19:40:24' # Test MDGPS\n\n\n\ninit_itr = 0\nfinal_itr = 100\n#final_itr = 30\nsamples_idx = [-1] # List of samples / None: all samples\nmax_traj_plots = None # None, plot all\nlast_n_iters = None # None, plot all iterations\nsensed_joints = 'RA'\nmethod = 'MDREPS'\n\nplot_eta = False\nplot_step_mult = False # If linearized policy(then NN policy) is worse, epsilon is reduced.\nplot_cs = True\nplot_sample_list_actions = False\nplot_sample_list_states = False\nplot_sample_list_obs = False\nplot_policy_output = False\nplot_policy_actions = False\nplot_policy_states = False\nplot_policy_obs = False\nplot_traj_distr = False\nplot_3d_traj = False\nplot_3d_pol_traj = False\n\neta_color = 'black'\ncs_color = 'red'\nstep_mult_color = 'red'\nsample_list_cols = 3\nplot_sample_list_max_min = False\nplot_joint_limits = True\n\ngps_path = '/home/desteban/workspace/robolearn/scenarios/robolearn_log/' + gps_directory_name\n\niteration_data_list = list()\niteration_ids = list()\n\nmax_available_itr = None\nfor pp in range(init_itr, final_itr):\n if os.path.isfile(gps_path+'/' + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n if os.path.isfile(gps_path+'/' + method.upper() + '_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n max_available_itr = pp\n\nif max_available_itr is not None:\n print(\"Max available iterations: %d\" % max_available_itr)\n\n if last_n_iters is not None:\n init_itr = max(max_available_itr - last_n_iters + 1, 0)\n\n if max_traj_plots is not None:\n if max_available_itr > max_traj_plots:\n itr_to_load = np.linspace(init_itr, max_available_itr, max_traj_plots, dtype=np.uint8)\n else:\n itr_to_load = range(init_itr, max_available_itr+1)\n\n else:\n itr_to_load = range(init_itr, max_available_itr+1)\n\n print(\"Iterations to load: %s\" % itr_to_load)\n for pp in itr_to_load:\n if os.path.isfile(gps_path+'/' + method.upper() +'_iteration_data_itr_'+str('%02d' % pp)+'.pkl'):\n print('Loading GPS iteration_data from iteration %d' % pp)\n iteration_data_list.append(pickle.load(open(gps_path+'/' + method.upper() +'_iteration_data_itr_'+str('%02d' % pp)+'.pkl',\n 'rb')))\n iteration_ids.append(pp)\n\n # total_cond = len(pol_sample_lists_costs[0])\n total_itr = len(iteration_data_list)\n total_cond = len(iteration_data_list[0])\n colormap = plt.cm.rainbow # nipy_spectral, Set1, Paired, winter\n\njoint_limits = [bigman_params['joints_limits'][ii] for ii in bigman_params['joint_ids'][sensed_joints]]\nT = iteration_data_list[-1][-1].sample_list.get_actions(samples_idx).shape[1]\n\nif plot_eta:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Eta values | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n etas = np.zeros(total_itr)\n for itr in range(total_itr):\n etas[itr] = iteration_data_list[itr][cond].eta\n ax.set_title('Eta values | Condition %d' % cond)\n ax.plot(range(1, total_itr+1), etas, color=eta_color)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\nif plot_step_mult:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Step multiplier | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n etas = np.zeros(total_itr)\n for itr in range(total_itr):\n etas[itr] = iteration_data_list[itr][cond].step_mult\n ax.set_title('Step multiplier | Condition %d' % cond)\n ax.plot(range(1, total_itr+1), etas, color=eta_color)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\nif plot_cs:\n for cond in range(total_cond):\n fig, ax = plt.subplots(1, 1)\n fig.canvas.set_window_title('Samples Costs | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n mean_costs = np.zeros(total_itr)\n max_costs = np.zeros(total_itr)\n min_costs = np.zeros(total_itr)\n std_costs = np.zeros(total_itr)\n for itr in range(total_itr):\n samples_cost_sum = iteration_data_list[itr][cond].cs.sum(axis=1)\n mean_costs[itr] = samples_cost_sum.mean()\n max_costs[itr] = samples_cost_sum.max()\n min_costs[itr] = samples_cost_sum.min()\n std_costs[itr] = samples_cost_sum.std()\n ax.set_title('Samples Costs | Condition %d' % cond)\n ax.plot(range(1, total_itr+1), mean_costs, color=cs_color)\n ax.fill_between(range(1, total_itr+1), min_costs, max_costs, alpha=0.5, color=cs_color)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\nif plot_sample_list_actions:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_actions(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Actions | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n actions = iteration_data_list[itr][cond].sample_list.get_actions(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(actions.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n #ax.set_xlim([0, actions.shape[2]])\n #ax.set_ylim([ymin, ymax])\n\n if plot_sample_list_max_min:\n ax.fill_between(range(actions.mean(axis=0).shape[0]), actions.min(axis=0)[:, ii],\n actions.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif plot_sample_list_states:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_states(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('States | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n states = iteration_data_list[itr][cond].sample_list.get_states(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"State %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(states.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n\n if plot_joint_limits and ii < len(joint_limits):\n ax.plot(np.tile(joint_limits[ii][0], [T]), color='black', linestyle='--', linewidth=1)\n ax.plot(np.tile(joint_limits[ii][1], [T]), color='black', linestyle='--', linewidth=1)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif plot_sample_list_obs:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].sample_list.get_obs(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Observations | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n obs = iteration_data_list[itr][cond].sample_list.get_obs(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Observation %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(obs.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif plot_policy_output:\n pol_sample_to_vis = -1\n pol_confidence = 0.95\n plot_confidence_interval = False\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.pol_mu.shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title(\"Policy's Actions | Condition %d\" % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n for itr in range(total_itr):\n mus = iteration_data_list[itr][cond].pol_info.pol_mu[pol_sample_to_vis]\n sigs = iteration_data_list[itr][cond].pol_info.pol_sig[pol_sample_to_vis]\n mins = np.zeros_like(mus)\n maxs = np.zeros_like(mus)\n for tt in range(mins.shape[0]):\n for dd in range(mins.shape[1]):\n mins[tt, dd], maxs[tt, dd] = scipy.stats.norm.interval(pol_confidence,\n loc=mus[tt, dd],\n scale=sigs[tt, dd, dd])\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n ax.plot(mus[:, ii], label=(\"itr %d\" % iteration_ids[itr]))\n if plot_confidence_interval:\n ax.fill_between(range(mus.shape[0]), mins[:, ii], maxs[:, ii], alpha=0.5)\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\nif plot_policy_actions:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.policy_samples.get_actions(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy Actions | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n actions = iteration_data_list[itr][cond].pol_info.policy_samples.get_actions(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Action %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(actions.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n #ax.set_xlim([0, actions.shape[2]])\n #ax.set_ylim([ymin, ymax])\n\n if plot_sample_list_max_min:\n ax.fill_between(range(actions.mean(axis=0).shape[0]), actions.min(axis=0)[:, ii],\n actions.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\n\nif plot_policy_states:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.policy_samples.get_states(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy States | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n states = iteration_data_list[itr][cond].pol_info.policy_samples.get_states(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"State %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(states.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n\n if plot_joint_limits and ii < len(joint_limits):\n ax.plot(np.tile(joint_limits[ii][0], [T]), color='black', linestyle='--', linewidth=1)\n ax.plot(np.tile(joint_limits[ii][1], [T]), color='black', linestyle='--', linewidth=1)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\nif plot_policy_obs:\n for cond in range(total_cond):\n dData = iteration_data_list[0][cond].pol_info.policy_samples.get_obs(samples_idx).shape[-1]\n fig, axs = plt.subplots(int(math.ceil(float(dData)/sample_list_cols)), sample_list_cols)\n fig.subplots_adjust(hspace=0)\n fig.canvas.set_window_title('Policy Observations | Condition %d' % cond)\n fig.set_facecolor((1, 1, 1))\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n lines = list()\n labels = list()\n for itr in range(total_itr):\n obs = iteration_data_list[itr][cond].pol_info.policy_samples.get_obs(samples_idx)\n for ii in range(axs.size):\n ax = axs[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dData:\n ax.set_title(\"Observation %d\" % (ii+1))\n label = \"itr %d\" % iteration_ids[itr]\n line = ax.plot(obs.mean(axis=0)[:, ii], label=label)[0]\n\n if ii == 0:\n lines.append(line)\n labels.append(label)\n\n if itr == 0:\n ax.tick_params(axis='both', direction='in')\n\n if plot_sample_list_max_min:\n ax.fill_between(range(states.mean(axis=0).shape[0]), states.min(axis=0)[:, ii],\n states.max(axis=0)[:, ii], alpha=0.5)\n # # One legend for each ax\n # legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n # legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0., borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n\n\ndef plot_3d_gaussian(ax, mu, sigma, edges=100, sigma_axes='XY', linestyle='-.', linewidth=1.0, color='black', alpha=0.1,\n label='', markeredgewidth=1.0):\n \"\"\"\n Plots ellipses in the xy plane representing the Gaussian distributions \n specified by mu and sigma.\n Args:\n mu - Tx3 mean vector for (x, y, z)\n sigma - Tx3x3 covariance matrix for (x, y, z)\n edges - the number of edges to use to construct each ellipse\n \"\"\"\n p = np.linspace(0, 2*np.pi, edges)\n xy_ellipse = np.c_[np.cos(p), np.sin(p)]\n T = mu.shape[0]\n\n if sigma_axes == 'XY':\n axes = [0, 1]\n elif sigma_axes == 'XZ':\n axes = [0, 2]\n elif sigma_axes == 'YZ':\n axes = [1, 2]\n else:\n raise AttributeError(\"Wrong sigma_axes\")\n\n xyz_idx = np.ix_(axes)\n sigma_idx = np.ix_(axes, axes)\n\n sigma_axes = np.clip(sigma[:, sigma_idx[0], sigma_idx[1]], 0, 0.05)\n u, s, v = np.linalg.svd(sigma_axes)\n\n for t in range(T):\n xyz = np.repeat(mu[t, :].reshape((1, 3)), edges, axis=0)\n xyz[:, xyz_idx[0]] += np.dot(xy_ellipse, np.dot(np.diag(np.sqrt(s[t, :])), u[t, :, :].T))\n ax.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2], linestyle=linestyle, linewidth=linewidth, marker=marker,\n markersize=markersize, markeredgewidth=markeredgewidth, alpha=alpha, color=color, label=label)\n\n\ndef lqr_forward(traj_distr, traj_info):\n \"\"\"\n Perform LQR forward pass. Computes state-action marginals from dynamics and policy.\n Args:\n traj_distr: A linear Gaussian policy object.\n traj_info: A TrajectoryInfo object.\n Returns:\n mu: A T x dX mean action vector.\n sigma: A T x dX x dX covariance matrix.\n \"\"\"\n # Compute state-action marginals from specified conditional\n # parameters and current traj_info.\n T = traj_distr.T\n dU = traj_distr.dU\n dX = traj_distr.dX\n\n # Constants.\n idx_x = slice(dX)\n\n # Allocate space.\n sigma = np.zeros((T, dX+dU, dX+dU))\n mu = np.zeros((T, dX+dU))\n\n # Pull out dynamics.\n Fm = traj_info.dynamics.Fm\n fv = traj_info.dynamics.fv\n dyn_covar = traj_info.dynamics.dyn_covar\n\n # Set initial state covariance and mean\n sigma[0, idx_x, idx_x] = traj_info.x0sigma\n mu[0, idx_x] = traj_info.x0mu\n\n for t in range(T):\n sigma[t, :, :] = np.vstack([\n np.hstack([sigma[t, idx_x, idx_x],\n sigma[t, idx_x, idx_x].dot(traj_distr.K[t, :, :].T)]),\n np.hstack([traj_distr.K[t, :, :].dot(sigma[t, idx_x, idx_x]),\n traj_distr.K[t, :, :].dot(sigma[t, idx_x, idx_x]).dot(traj_distr.K[t, :, :].T)\n + traj_distr.pol_covar[t, :, :]])])\n\n # u_t = p(u_t | x_t)\n mu[t, :] = np.hstack([mu[t, idx_x], traj_distr.K[t, :, :].dot(mu[t, idx_x]) + traj_distr.k[t, :]])\n\n if t < T - 1:\n # x_t+1 = p(x_t+1 | x_t, u_t)\n sigma[t+1, idx_x, idx_x] = Fm[t, :, :].dot(sigma[t, :, :]).dot(Fm[t, :, :].T) + dyn_covar[t, :, :]\n mu[t+1, idx_x] = Fm[t, :, :].dot(mu[t, :]) + fv[t, :]\n return mu, sigma\n\nif plot_traj_distr:\n traj_distr_confidence = 0.95\n plot_confidence_interval = False\n plot_legend = True\n for cond in range(total_cond):\n dX = iteration_data_list[-1][cond].traj_distr.dX\n dU = iteration_data_list[-1][cond].traj_distr.dU\n fig_act, axs_act = plt.subplots(int(math.ceil(float(dU)/sample_list_cols)), sample_list_cols)\n fig_act.subplots_adjust(hspace=0)\n fig_act.canvas.set_window_title(\"Trajectory Distribution's Actions | Condition %d\" % cond)\n fig_act.set_facecolor((1, 1, 1))\n fig_state, axs_state = plt.subplots(int(math.ceil(float(dX)/sample_list_cols)), sample_list_cols)\n fig_state.subplots_adjust(hspace=0)\n fig_state.canvas.set_window_title(\"Trajectory Distribution's States | Condition %d\" % cond)\n fig_state.set_facecolor((1, 1, 1))\n\n for ii in range(axs_act.size):\n ax = axs_act[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n for ii in range(axs_state.size):\n ax = axs_state[ii/sample_list_cols, ii % sample_list_cols]\n ax.set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 1, total_itr)])\n\n for itr in range(total_itr):\n traj_distr = iteration_data_list[itr][cond].traj_distr\n traj_info = iteration_data_list[itr][cond].traj_info\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n T = traj_distr.T\n dU = traj_distr.dU\n dX = traj_distr.dX\n x_idxs = range(dX)\n u_idxs = range(dX, dX+dU)\n mins = np.zeros_like(mu)\n maxs = np.zeros_like(mu)\n if plot_confidence_interval:\n for tt in range(T):\n sigma_diag = np.diag(sigma[tt, :, :])\n mins[tt, :], maxs[tt, :] = scipy.stats.norm.interval(traj_distr_confidence, loc=mu[tt, :],\n scale=sigma_diag[:])\n\n for ii in range(axs_act.size):\n ax = axs_act[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dU:\n ax.set_title(\"Action %d\" % (ii+1))\n ax.plot(mu[:, u_idxs[ii]], label=(\"itr %d\" % iteration_ids[itr]))\n if plot_confidence_interval:\n ax.fill_between(T, mins[:, ii], maxs[:, ii], alpha=0.5)\n if plot_legend:\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\n for ii in range(axs_state.size):\n ax = axs_state[ii/sample_list_cols, ii % sample_list_cols]\n if ii < dX:\n ax.set_title(\"State %d\" % (ii+1))\n ax.plot(mu[:, x_idxs[ii]], label=(\"itr %d\" % iteration_ids[itr]))\n if plot_confidence_interval:\n ax.fill_between(T, mins[:, ii], maxs[:, ii], alpha=0.5)\n if plot_legend:\n legend = ax.legend(loc='lower right', fontsize='x-small', borderaxespad=0.)\n legend.get_frame().set_alpha(0.4)\n else:\n plt.setp(ax, visible=False)\n\nif plot_3d_traj:\n distance_idxs = [24, 25, 26] # NOT TO USE -1, -2, etc because it will get the mu and variance of u !!!\n linestyle = '-'\n linewidth = 1.0\n marker = None\n markersize = 5.0\n markeredgewidth = 1.0\n alpha = 1.0\n\n gauss_linestyle = ':'\n gauss_linewidth = 0.2\n gauss_marker = None\n gauss_markersize = 2.0\n gauss_markeredgewidth = 0.2\n gauss_alpha = 0.3\n\n views = ['XY', 'XZ']\n\n des_colormap = [colormap(i) for i in np.linspace(0, 1, total_itr)]\n\n for cond in range(total_cond):\n fig_3d_traj = plt.figure()\n lines = list()\n labels = list()\n\n for vv, view in enumerate(views):\n ax_3d_traj = fig_3d_traj.add_subplot(1, len(views), vv+1, projection='3d')\n ax_3d_traj.set_prop_cycle('color', des_colormap)\n plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)\n plot = ax_3d_traj.plot([0], [0], [0], color='green', marker='o', markersize=10)\n\n fig_3d_traj.canvas.set_window_title(\"Expected Trajectories | Condition %d\" % cond)\n ax_3d_traj.set_xlabel('X')\n ax_3d_traj.set_ylabel('Y')\n ax_3d_traj.set_zlabel('Z')\n\n if view == 'XY':\n azim = 0.\n elev = 90.\n elif view == 'XZ':\n azim = 90.\n elev = 0.\n elif view == 'YZ':\n azim = 90.\n elev = 90.\n else:\n raise AttributeError(\"Wrong view %s\" % view)\n\n ax_3d_traj.view_init(elev=elev, azim=azim)\n\n for itr in range(total_itr):\n traj_distr = iteration_data_list[itr][cond].traj_distr\n traj_info = iteration_data_list[itr][cond].traj_info\n\n mu, sigma = lqr_forward(traj_distr, traj_info)\n\n label = \"itr %d\" % iteration_ids[itr]\n\n xs = np.linspace(5, 0, 100)\n plot = ax_3d_traj.plot(mu[:, distance_idxs[0]],\n mu[:, distance_idxs[1]],\n zs=mu[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n if vv == 0:\n lines.append(plot)\n labels.append(label)\n\n sigma_idx = np.ix_(distance_idxs, distance_idxs)\n plot_3d_gaussian(ax_3d_traj, mu[:, distance_idxs], sigma[:, sigma_idx[0], sigma_idx[1]],\n sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n color=des_colormap[itr], alpha=gauss_alpha, label='',\n markeredgewidth=gauss_markeredgewidth)\n\n X = np.append(mu[:, distance_idxs[0]], 0)\n Y = np.append(mu[:, distance_idxs[1]], 0)\n Z = np.append(mu[:, distance_idxs[2]], 0)\n mid_x = (X.max() + X.min()) * 0.5\n mid_y = (Y.max() + Y.min()) * 0.5\n mid_z = (Z.max() + Z.min()) * 0.5\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n ax_3d_traj.set_xlim(mid_x - max_range, mid_x + max_range)\n ax_3d_traj.set_ylim(mid_y - max_range, mid_y + max_range)\n ax_3d_traj.set_zlim(mid_z - max_range, mid_z + max_range)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.)\n legend.get_frame().set_alpha(0.4)\n\nif plot_3d_pol_traj:\n distance_idxs = [24, 25, 26] # NOT TO USE -1, -2, etc because it will get the mu and variance of u !!!\n linestyle = '-'\n linewidth = 1.0\n marker = None\n markersize = 5.0\n markeredgewidth = 1.0\n alpha = 1.0\n\n gauss_linestyle = ':'\n gauss_linewidth = 0.2\n gauss_marker = None\n gauss_markersize = 2.0\n gauss_markeredgewidth = 0.2\n gauss_alpha = 0.3\n\n views = ['XY', 'XZ']\n\n des_colormap = [colormap(i) for i in np.linspace(0, 1, total_itr)]\n\n samples_idx = -1\n\n for cond in range(total_cond):\n fig_3d_traj = plt.figure()\n lines = list()\n labels = list()\n\n for vv, view in enumerate(views):\n ax_3d_traj = fig_3d_traj.add_subplot(1, len(views), vv+1, projection='3d')\n ax_3d_traj.set_prop_cycle('color', des_colormap)\n plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)\n plot = ax_3d_traj.plot([0], [0], [0], color='green', marker='o', markersize=10)\n\n fig_3d_traj.canvas.set_window_title(\"Expected Trajectories | Condition %d\" % cond)\n ax_3d_traj.set_xlabel('X')\n ax_3d_traj.set_ylabel('Y')\n ax_3d_traj.set_zlabel('Z')\n\n if view == 'XY':\n azim = 0.\n elev = 90.\n elif view == 'XZ':\n azim = 90.\n elev = 0.\n elif view == 'YZ':\n azim = 90.\n elev = 90.\n else:\n raise AttributeError(\"Wrong view %s\" % view)\n\n ax_3d_traj.view_init(elev=elev, azim=azim)\n\n for itr in range(total_itr):\n # traj_distr = iteration_data_list[itr][cond].traj_distr\n # traj_info = iteration_data_list[itr][cond].traj_info\n # mu, sigma = lqr_forward(traj_distr, traj_info)\n\n mu = iteration_data_list[itr][cond].pol_info.policy_samples.get_states()[samples_idx, :, :]\n\n label = \"itr %d\" % iteration_ids[itr]\n\n xs = np.linspace(5, 0, 100)\n plot = ax_3d_traj.plot(mu[:, distance_idxs[0]],\n mu[:, distance_idxs[1]],\n zs=mu[:, distance_idxs[2]],\n linestyle=linestyle, linewidth=linewidth, marker=marker, markersize=markersize,\n markeredgewidth=markeredgewidth, alpha=alpha, color=des_colormap[itr],\n label=label)[0]\n\n if vv == 0:\n lines.append(plot)\n labels.append(label)\n\n # sigma_idx = np.ix_(distance_idxs, distance_idxs)\n # plot_3d_gaussian(ax_3d_traj, mu[:, distance_idxs], sigma[:, sigma_idx[0], sigma_idx[1]],\n # sigma_axes=view, edges=100, linestyle=gauss_linestyle, linewidth=gauss_linewidth,\n # color=des_colormap[itr], alpha=gauss_alpha, label='',\n # markeredgewidth=gauss_markeredgewidth)\n\n X = np.append(mu[:, distance_idxs[0]], 0)\n Y = np.append(mu[:, distance_idxs[1]], 0)\n Z = np.append(mu[:, distance_idxs[2]], 0)\n mid_x = (X.max() + X.min()) * 0.5\n mid_y = (Y.max() + Y.min()) * 0.5\n mid_z = (Z.max() + Z.min()) * 0.5\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n ax_3d_traj.set_xlim(mid_x - max_range, mid_x + max_range)\n ax_3d_traj.set_ylim(mid_y - max_range, mid_y + max_range)\n ax_3d_traj.set_zlim(mid_z - max_range, mid_z + max_range)\n\n # One legend for all figures\n legend = plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.)\n legend.get_frame().set_alpha(0.4)\n\nplt.show(block=False)\n\nraw_input('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5723148584365845, "alphanum_fraction": 0.6156455874443054, "avg_line_length": 44.4449348449707, "blob_id": "85c5a0a855ede43464fe3e52b8cdc61151b2c233", "content_id": "48019388d372de79f7575e71857464ef42a64912", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10316, "license_type": "permissive", "max_line_length": 143, "num_lines": 227, "path": "/scenarios/torquecontrol-test.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport math\nimport os\nimport rospy\nimport matplotlib.pyplot as plt\nimport tf\nfrom XCM.msg import CommandAdvr\nfrom XCM.msg import JointStateAdvr\nfrom robolearn.old_utils.trajectory_reproducer import TrajectoryReproducer\nfrom robolearn.old_utils.iit.iit_robots_params import bigman_params\nfrom robolearn.old_utils.transformations_utils import *\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation\nfrom robolearn.old_utils.plot_utils import plot_desired_sensed_torque_position\nfrom robolearn.old_utils.plot_utils import plot_joint_info\nfrom robolearn.old_utils.plot_utils import plot_desired_sensed_data\nfrom gazebo_msgs.srv import SpawnModel\nfrom gazebo_msgs.srv import DeleteModel\nfrom geometry_msgs.msg import Pose\nimport rbdl\n\nfrom robolearn.old_utils.robot_model import RobotModel\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n# Always turn off Gazebo logger\nos.system(\"gz log -d 0\")\n\n#current_path = os.path.abspath(__file__)\npath = os.path.abspath(__file__)\ndir_path = os.path.dirname(path)\n\nload_torques = False\ntorques_saved_filename = 'torques_init_traj.npy'\n\nT_init = 3\nT_traj = 5\nT_impedance_zero = 10\nfreq = 100\n\n# ROBOT MODEL for trying ID\nrobot_urdf_file = os.environ[\"ROBOTOLOGY_ROOT\"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'\nrobot_model = rbdl.loadModel(robot_urdf_file, verbose=False, floating_base=False)\n#LH_name = 'LWrMot3'\n#RH_name = 'RWrMot3'\ndefault_joint_stiffness = np.array([8000., 5000., 8000., 5000., 5000., 2000.,\n 8000., 5000., 5000., 5000., 5000., 2000.,\n 5000., 8000., 5000.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.,\n 300., 300.,\n 5000., 8000., 5000., 5000., 300., 2000., 300.])\ndefault_joint_damping = np.array([30., 50., 30., 30., 30., 5.,\n 30., 50., 30., 30., 30., 5.,\n 30., 50., 30.,\n 30., 50., 30., 30., 1., 5., 1.,\n 1., 1.,\n 30., 50., 30., 30., 1., 5., 1.])\npd_tau_weights = np.array([0.80, 0.50, 0.80, 0.50, 0.50, 0.20,\n 0.80, 0.50, 0.50, 0.50, 0.50, 0.20,\n 0.50, 0.80, 0.50,\n 0.50, 0.80, 0.30, 0.50, 0.10, 0.20, 0.03,\n 0.03, 0.03,\n 0.50, 0.80, 0.50, 0.50, 0.10, 0.20, 0.03])\nKp_tau = 100 * pd_tau_weights\nKd_tau = 2 * pd_tau_weights\n\nKp_tau = np.eye(robot_model.q_size)*np.array(np.array([0.80, 0.50, 0.80, 0.50, 0.50, 0.20,\n 0.80, 0.50, 0.50, 0.50, 0.50, 0.20,\n 0.50, 0.80, 0.50,\n 0.50, 0.80, 0.30, 0.50, 0.10, 0.20, 0.03,\n 0.03, 0.03,\n 0.50, 0.80, 0.50, 0.50, 0.10, 0.20, 0.03]))\nKd_tau = 2*np.sqrt(Kp_tau)\n\njoint_pos_state = np.zeros(robot_model.q_size)\njoint_vel_state = np.zeros(robot_model.qdot_size)\njoint_effort_state = np.zeros(robot_model.qdot_size)\njoint_stiffness_state = np.zeros(robot_model.qdot_size)\njoint_damping_state = np.zeros(robot_model.qdot_size)\njoint_state_id = []\n\n\ndef callback(data, params):\n joint_ids = params[0]\n joint_pos_state = params[1]\n joint_effort_state = params[2]\n #if not joint_ids:\n # joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_ids[:] = [bigman_params['joints_names'].index(name) for name in data.name]\n joint_pos_state[joint_ids] = data.link_position\n joint_effort_state[joint_ids] = data.effort\n joint_stiffness_state[joint_ids] = data.stiffness\n joint_damping_state[joint_ids] = data.damping\n joint_vel_state[joint_ids] = data.link_velocity\n\npublisher = rospy.Publisher(\"/xbotcore/bigman/command\", CommandAdvr, queue_size=10)\nsubscriber = rospy.Subscriber(\"/xbotcore/bigman/joint_states\", JointStateAdvr, callback, (joint_state_id, joint_pos_state, joint_effort_state))\nrospy.init_node('torquecontrol_example')\npub_rate = rospy.Rate(freq)\ndes_cmd = CommandAdvr()\ndes_cmd.name = bigman_params['joints_names']\n\nq_init = np.zeros(robot_model.q_size)\n#q_init[16] = np.deg2rad(90)\nq_init[24] = np.deg2rad(-30)\nq_init[25] = np.deg2rad(-65)\nq_init[26] = np.deg2rad(20)\nq_init[27] = np.deg2rad(-95)\nq_init[28] = np.deg2rad(20)\nq_init[29] = np.deg2rad(0)\nq_init[30] = np.deg2rad(0)\n\n\nN = int(np.ceil(T_init*freq))\njoint_init_traj = polynomial5_interpolation(N, q_init, joint_pos_state)[0]\nprint(\"Moving to zero configuration with Position control.\")\nfor ii in range(N):\n des_cmd.position = joint_init_traj[ii, :]\n des_cmd.stiffness = default_joint_stiffness\n des_cmd.damping = default_joint_damping\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\nq_end = np.zeros(robot_model.q_size)\njoints_to_move = bigman_params['joint_ids']['BA'][7:]\n#joints_to_move = [bigman_params['joint_ids']['BA'][6]]\nq_end[16] = np.deg2rad(90)\nq_end[joints_to_move] += np.deg2rad(-20)\nN = int(np.ceil(T_traj*freq))\njoint_traj, joint_traj_dots, joint_traj_ddots = polynomial5_interpolation(N, q_end, joint_pos_state)\njoint_traj_dots *= freq\njoint_traj_ddots *= freq*freq\n#joint_traj_dots = np.vstack((np.diff(joint_traj, axis=0), np.zeros((1, robot_model.qdot_size))))*freq\n#joint_traj_ddots = np.vstack((np.diff(joint_traj_dots, axis=0), np.zeros((1, robot_model.qdot_size))))*freq*freq\n\ntau = np.zeros(robot_model.qdot_size)\na = np.zeros(robot_model.qdot_size)\nM = np.zeros((robot_model.qdot_size, robot_model.qdot_size))\ndes_cmd.name = [bigman_params['joints_names'][idx] for idx in joints_to_move]\ndes_cmd.position = []\nqs_traj = np.zeros((N, robot_model.q_size))\nqdots_traj = np.zeros((N, robot_model.q_size))\ntaus_cmd_traj = np.zeros((N, robot_model.qdot_size))\n\nif load_torques:\n taus_traj = np.load(torques_saved_filename)\nelse:\n taus_traj = np.zeros((N, robot_model.qdot_size))\n\nprint(\"Moving to the initial configuration of trajectory with torque control.\")\nraw_input(\"Press a key to continue...\")\nfor ii in range(N):\n if load_torques:\n print(\"Reproducing previous torques!\")\n des_cmd.effort = taus_traj[ii, joints_to_move]\n des_cmd.stiffness = np.zeros_like(tau[joints_to_move])\n des_cmd.damping = np.zeros_like(tau[joints_to_move])\n else:\n des_cmd.position = joint_traj[ii, joints_to_move]\n des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n des_cmd.damping = default_joint_damping[joints_to_move]\n rbdl.InverseDynamics(robot_model, joint_traj[ii, :], joint_traj_dots[ii, :], joint_traj_ddots[ii, :], tau)\n #taus_traj[ii, :] = joint_effort_state\n #print(joint_traj[ii, joints_to_move] - joint_pos_state[joints_to_move])\n #rbdl.NonlinearEffects(robot_model, joint_pos_state, joint_vel_state, g)\n #rbdl.NonlinearEffects(robot_model, joint_pos_state, joint_vel_state*0, g)\n print(repr(joint_traj_ddots[ii, joints_to_move]))\n print(repr(joint_traj_ddots[ii, joints_to_move]/(freq*freq)))\n print(\"##\")\n print(repr(joint_traj_dots[ii, joints_to_move]))\n print(repr(joint_vel_state[joints_to_move]))\n print(\"--\")\n #a = joint_traj_ddots[ii, :] + \\\n # default_joint_damping*0 * (joint_traj_dots[ii, :] - joint_vel_state) + \\\n # default_joint_stiffness*0.0 * (joint_traj[ii, :] - joint_pos_state)\n pd_tau = Kp_tau.dot(joint_traj[ii, :] - joint_pos_state) + \\\n Kd_tau.dot(joint_traj_dots[ii, :] - joint_vel_state)\n #pd_tau = default_joint_stiffness * (joint_traj[ii, :] - joint_pos_state) + \\\n # default_joint_damping * (joint_traj_dots[ii, :] - joint_vel_state)\n tau += pd_tau\n #rbdl.InverseDynamics(robot_model, joint_pos_state, joint_vel_state, a, tau)\n #rbdl.NonlinearEffects(robot_model, joint_traj[ii, :], joint_vel_state*0, tau)\n #tau = np.ones(robot_model.qdot_size)*-0.5\n #a = default_joint_damping * (joint_traj_dots[ii, :] - joint_vel_state)\n #rbdl.CompositeRigidBodyAlgorithm(robot_model, joint_pos_state, M, update_kinematics=True)\n #rbdl.InverseDynamics(robot_model, joint_pos_state, joint_vel_state/freq, joint_traj_ddots[ii, :]/(freq*freq), tau)\n #rbdl.InverseDynamics(robot_model, joint_traj[ii, :], joint_traj_dots[ii, :], joint_traj_ddots[ii, :], tau)\n #tau += M.dot(a)\n print(repr(tau[joints_to_move]))\n print(repr(joint_effort_state[joints_to_move]))\n print(\"++\")\n des_cmd.position = []\n des_cmd.effort = tau[joints_to_move]\n des_cmd.stiffness = np.zeros_like(tau[joints_to_move])\n des_cmd.damping = np.zeros_like(tau[joints_to_move])\n publisher.publish(des_cmd)\n taus_traj[ii, :] = joint_effort_state\n taus_cmd_traj[ii, :] = tau\n qs_traj[ii, :] = joint_pos_state\n qdots_traj[ii, :] = joint_vel_state\n pub_rate.sleep()\n\n# Return to position control\nprint(\"Changing to position control!\")\nfor ii in range(50):\n des_cmd.position = joint_pos_state[joints_to_move]\n des_cmd.stiffness = default_joint_stiffness[joints_to_move]\n des_cmd.damping = default_joint_damping[joints_to_move]\n publisher.publish(des_cmd)\n pub_rate.sleep()\n\n\n# ##### #\n# PLOTS #\n# ##### #\njoints_to_plot = bigman_params['joint_ids']['LA']\ncols = 3\njoint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\nprint(\"Plotting...\")\nplot_desired_sensed_data(joints_to_plot, joint_traj, qs_traj, joint_names, data_type='position', block=False)\n#plot_desired_sensed_data(joints_to_plot, joint_traj_dots, qdots_traj, joint_names, data_type='velocity', block=False)\nplot_desired_sensed_torque_position(joints_to_plot, taus_cmd_traj, taus_traj,\n joint_traj, qs_traj, joint_names, block=True, cols=cols)\n\nprint(\"Saving sensed torques in %s\" % torques_saved_filename)\nnp.save(torques_saved_filename, taus_traj)\nsys.exit()\n" }, { "alpha_fraction": 0.6041365265846252, "alphanum_fraction": 0.6267799735069275, "avg_line_length": 51.621620178222656, "blob_id": "60e3ce8dc4c627c59b5eed9c62ec8bc513e0868b", "content_id": "99de2c81717f91eb3d84ea32a5b0dc99b2a2c7bc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21419, "license_type": "permissive", "max_line_length": 132, "num_lines": 407, "path": "/scenarios/reach-lift-box-trajgen.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\nfrom robolearn.old_utils.iit.iit_robots_params import *\nfrom robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation\nfrom robolearn.old_utils.trajectory_interpolators import spline_interpolation\nfrom robolearn.old_utils.trajectory_interpolators import quaternion_interpolation\nfrom robolearn.old_utils.robot_model import *\nfrom robolearn.old_utils.iit.robot_poses.bigman.poses import *\nfrom robolearn.old_utils.transformations_utils import *\nfrom robolearn.old_utils.plot_utils import *\nimport tf\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=1000)\n\n# Script parameters\n#box_position = np.array([0.75, 0.0, 0.0184])\nbox_mass = 0.71123284\nbox_position = np.array([0.75,\n 0.00,\n 0.0184])\nbox_size = [0.4, 0.5, 0.3]\nbox_yaw = 0 # Degrees\n#box_orient = tf.transformations.rotation_matrix(np.deg2rad(15), [1, 0, 0]) # For the EEs is rotation in X\nbox_orient = tf.transformations.rotation_matrix(np.deg2rad(box_yaw), [0, 0, 1])\nbox_matrix = homogeneous_matrix(rot=box_orient, pos=box_position)\nfreq = 100\nT_init = 1\nT_reach = 10\nT_lift = 10\n\n# Save/Load file name\nfile_name = 'trajectories/traj1'+'_x'+str(box_position[0])+'_y'+str(box_position[1])+'_Y'+str(box_yaw)\nload_reach_traj = False\nload_lift_traj = False\n#load_reach_traj = True\n#load_lift_traj = True\nsave_reach_traj = True\nsave_lift_traj = True\n\nplot_at_the_end = True\n\nreach_option = 0\n#reach_option 0: IK desired final pose, interpolate in joint space\n#reach_option 1: Trajectory in EEs, then IK whole trajectory\n#reach_option 2: Trajectory in EEs, IK with Jacobians\n\nlift_option = 2\n#lift_option 0: IK desired final pose, interpolate the others\n#lift_option 1: Trajectory in EEs, then IK whole trajectory\n#lift_option 2: Trajectory in EEs, IK with Jacobians\n\nregularization_parameter = 0.02 # For IK optimization algorithm\nik_method = 'iterative' #iterative / optimization\n\nq_init = np.zeros(31)\nq_init[16] = np.deg2rad(50)\nq_init[25] = np.deg2rad(-50)\n#q_init = np.deg2rad(np.array(bigman_Apose))\n#q_init = np.deg2rad(np.array(bigman_Fpose))\n#q_init = np.deg2rad(np.array(bigman_Tpose))\n\n# Robot Model\nrobot_urdf = os.environ[\"ROBOTOLOGY_ROOT\"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'\nrobot_model = RobotModel(robot_urdf)\nLH_name = 'LWrMot3'\nRH_name = 'RWrMot3'\nl_soft_hand_offset = np.array([0.000, -0.030, -0.210])\nr_soft_hand_offset = np.array([0.000, 0.030, -0.210])\n\n\n# ###########\n# REACH BOX #\n# ###########\nif not load_reach_traj:\n print(\"\\033[5mGenerating reaching trajectory...\")\n ## Orientation\n ##des_orient = homogeneous_matrix(rot=rot)\n #des_orient = tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0])\n ###des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-5), [1, 0, 0]))\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-3), [1, 0, 0]))\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(5), [0, 0, 1]))\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(10), [0, 1, 0]))\n #des_orient = des_orient.dot(box_orient)\n box_LH_position = np.array([0.05,\n box_size[1]/2. - 0.00,\n -0.05])\n box_LH_matrix = homogeneous_matrix(pos=box_LH_position)\n LH_reach_matrix = box_matrix.dot(box_LH_matrix)\n LH_reach_matrix = LH_reach_matrix.dot(tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0]))\n LH_reach_pose = np.zeros(7)\n LH_reach_pose[4:] = tf.transformations.translation_from_matrix(LH_reach_matrix)\n LH_reach_pose[:4] = tf.transformations.quaternion_from_matrix(LH_reach_matrix)\n\n box_RH_position = np.array([0.05,\n -box_size[1]/2. + 0.00,\n -0.05])\n box_RH_matrix = homogeneous_matrix(pos=box_RH_position)\n RH_reach_matrix = box_matrix.dot(box_RH_matrix)\n RH_reach_matrix = RH_reach_matrix.dot(tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0]))\n RH_reach_pose = np.zeros(7)\n RH_reach_pose[4:] = tf.transformations.translation_from_matrix(RH_reach_matrix)\n RH_reach_pose[:4] = tf.transformations.quaternion_from_matrix(RH_reach_matrix)\n\n N = int(np.ceil(T_reach*freq))\n torso_joints = bigman_params['joint_ids']['TO']\n\n if reach_option == 0:\n q_reach = robot_model.ik(LH_name, LH_reach_pose, body_offset=l_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method=ik_method)\n q_reach2 = robot_model.ik(RH_name, RH_reach_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method=ik_method)\n\n q_reach[bigman_params['joint_ids']['RA']] = q_reach2[bigman_params['joint_ids']['RA']]\n\n # Trajectory\n joint_reach_trajectory = polynomial5_interpolation(N, q_reach, q_init)[0]\n\n elif reach_option == 1:\n q = q_init.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_reach_pose = polynomial5_interpolation(N, LH_reach_pose, actual_LH_pose)[0]\n desired_RH_reach_pose = polynomial5_interpolation(N, RH_reach_pose, actual_RH_pose)[0]\n\n viapoint_LH_reach = np.empty(3)\n\n quatLH_interpolation = quaternion_interpolation(N, LH_reach_pose[:4], actual_LH_pose[:4])\n quatRH_interpolation = quaternion_interpolation(N, RH_reach_pose[:4], actual_RH_pose[:4])\n desired_LH_reach_pose[:, :4] = quatLH_interpolation\n desired_RH_reach_pose[:, :4] = quatRH_interpolation\n\n joint_reach_trajectory = np.zeros((desired_LH_reach_pose.shape[0], robot_model.q_size))\n joint_reach_trajectory[0, :] = q\n\n q_reach = np.empty(robot_model.q_size)\n q_reach2 = np.empty(robot_model.q_size)\n for ii in range(desired_LH_reach_pose.shape[0]-1):\n print(\"%d/%d \" % (ii+1, N))\n #print(\"%d/%d \" % (ii+1, N))\n q_reach[:] = robot_model.ik(LH_name, desired_LH_reach_pose[ii+1, :], body_offset=l_soft_hand_offset,\n q_init=joint_reach_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method=ik_method,\n regularization_parameter=regularization_parameter)\n q_reach2[:] = robot_model.ik(RH_name, desired_RH_reach_pose[ii+1, :], body_offset=r_soft_hand_offset,\n q_init=joint_reach_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method=ik_method,\n regularization_parameter=regularization_parameter)\n q_reach[bigman_params['joint_ids']['RA']] = q_reach2[bigman_params['joint_ids']['RA']]\n joint_reach_trajectory[ii+1, :] = q_reach\n #print(joint_reach_trajectory[ii+1, :]-joint_reach_trajectory[ii, :])\n print(sum(joint_reach_trajectory[ii+1, :]-joint_reach_trajectory[ii, :]))\n\n elif reach_option == 2:\n q = q_init.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_reach_pose = polynomial5_interpolation(N, LH_reach_pose, actual_LH_pose)[0]\n desired_RH_reach_pose = polynomial5_interpolation(N, RH_reach_pose, actual_RH_pose)[0]\n\n quatLH_interpolation = quaternion_interpolation(N, LH_reach_pose[:4], actual_LH_pose[:4])\n quatRH_interpolation = quaternion_interpolation(N, RH_reach_pose[:4], actual_RH_pose[:4])\n desired_LH_reach_pose[:, :4] = quatLH_interpolation\n desired_RH_reach_pose[:, :4] = quatRH_interpolation\n\n q_reach = robot_model.ik(LH_name, LH_reach_pose, body_offset=l_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method=ik_method)\n q_reach2 = robot_model.ik(RH_name, RH_reach_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method=ik_method)\n q_reach[bigman_params['joint_ids']['RA']] = q_reach2[bigman_params['joint_ids']['RA']]\n\n #J1 = np.zeros((6, robot_model.qdot_size))\n #J2 = np.zeros((6, robot_model.qdot_size))\n J = np.zeros((12, robot_model.qdot_size))\n xdot = np.zeros(12)\n qdot = np.zeros(robot_model.qdot_size)\n K = 500\n else:\n raise ValueError(\"Wrong reach_option %d\" % reach_option)\n print(\"\\033[31mDONE!! \\033[0m\")\n\n #RH_reach_pose = robot_model.fk(RH_name, q=np.zeros(robot_model.q_size), body_offset=r_soft_hand_offset)\n #RH_reach_pose[4:] = LH_reach_pose[4:]\n #RH_reach_pose[5] = box_position[1] - box_size[1]/2. + 0.02\n #des_orient = tf.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0])\n #des_orient = des_orient.dot(box_orient)\n #RH_reach_pose[:4] = tf.transformations.quaternion_from_matrix(des_orient)\n\nelse:\n print(\"\\n\\033[5mLoading reaching trajectory...\")\n joint_reach_trajectory = np.load(file_name + '_m' + str(reach_option) + '_reach.npy')\n if reach_option == 2:\n q_reach = joint_reach_trajectory[-1, :]\n #J1 = np.zeros((6, robot_model.qdot_size))\n #J2 = np.zeros((6, robot_model.qdot_size))\n J = np.zeros((12, robot_model.qdot_size))\n xdot = np.zeros(12)\n qdot = np.zeros(robot_model.qdot_size)\n desired_LH_reach_pose = np.load(file_name+'_reach_LH_EE.npy')\n desired_RH_reach_pose = np.load(file_name+'_reach_RH_EE.npy')\n print(\"\\033[31mDONE!! \\033[0m\")\n\n\n\n# ######## #\n# LIFT BOX #\n# ######## #\nif not load_lift_traj:\n print(\"\\033[5mGenerating lifting trajectory...\")\n LH_lift_pose = LH_reach_pose.copy()\n LH_lift_pose[6] += 0.3\n RH_lift_pose = RH_reach_pose.copy()\n RH_lift_pose[6] += 0.3\n\n N = int(np.ceil(T_lift*freq))\n\n #final_LH_lift_pose = actual_LH_lift_pose.copy()\n #final_LH_lift_pose[-1] += 0.3\n #final_LH_lift_pose[-2] -= 0.005\n #final_RH_lift_pose = actual_RH_lift_pose.copy()\n #final_RH_lift_pose[-1] += 0.3\n ##des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-4), [1, 0, 0]))\n #des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-9), [1, 0, 0]))\n #des_orient = des_orient.dot(tf.transformations.rotation_matrix(np.deg2rad(-3), [0, 0, 1]))\n #final_LH_lift_pose[:4] = tf.transformations.quaternion_from_matrix(des_orient)\n\n if lift_option == 0:\n q_lift = robot_model.ik(LH_name, LH_lift_pose, body_offset=l_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method=ik_method)\n q_lift2 = robot_model.ik(RH_name, RH_lift_pose, body_offset=r_soft_hand_offset,\n mask_joints=torso_joints, joints_limits=bigman_params['joints_limits'],\n method=ik_method)\n q_lift[bigman_params['joint_ids']['RA']] = q_lift2[bigman_params['joint_ids']['RA']]\n joint_lift_trajectory = polynomial5_interpolation(N, q_lift, q_reach)[0]\n\n elif lift_option == 1:\n q = q_reach.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_lift_pose = polynomial5_interpolation(N, LH_lift_pose, actual_LH_pose)[0]\n desired_RH_lift_pose = polynomial5_interpolation(N, RH_lift_pose, actual_RH_pose)[0]\n\n joint_lift_trajectory = np.zeros((N, robot_model.q_size))\n joint_lift_trajectory[0, :] = q\n q_lift = np.empty(robot_model.q_size)\n q_lift2 = np.empty(robot_model.q_size)\n for ii in range(N-1):\n print(\"%d/%d \" % (ii+1, N))\n #print(\"%d/%d \" % (ii+1, N))\n q_lift[:] = robot_model.ik(LH_name, desired_LH_lift_pose[ii+1, :], body_offset=l_soft_hand_offset,\n q_init=joint_lift_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method=ik_method,\n regularization_parameter=regularization_parameter)\n q_lift2[:] = robot_model.ik(RH_name, desired_RH_lift_pose[ii+1, :], body_offset=r_soft_hand_offset,\n q_init=joint_lift_trajectory[ii, :], joints_limits=bigman_params['joints_limits'],\n mask_joints=torso_joints, method=ik_method,\n regularization_parameter=regularization_parameter)\n q_lift[bigman_params['joint_ids']['RA']] = q_lift2[bigman_params['joint_ids']['RA']]\n joint_lift_trajectory[ii+1, :] = q_lift\n\n elif lift_option == 2:\n T_lift = 2\n N = int(np.ceil(T_lift*freq))\n\n q = q_reach.copy()\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n desired_LH_lift_pose = polynomial5_interpolation(N, LH_lift_pose, actual_LH_pose)[0]\n desired_RH_lift_pose = polynomial5_interpolation(N, RH_lift_pose, actual_RH_pose)[0]\n\n #J1 = np.zeros((6, robot_model.qdot_size))\n #J2 = np.zeros((6, robot_model.qdot_size))\n J = np.zeros((12, robot_model.qdot_size))\n xdot = np.zeros(12)\n qdot = np.zeros(robot_model.qdot_size)\n K = 500\n else:\n raise ValueError(\"Wrong lift_option %d\" % lift_option)\n print(\"\\n\\033[31mDONE!! \\033[0m\")\n\nelse:\n print(\"\\n\\033[5mLoading lifting trajectory...\")\n joint_lift_trajectory = np.load(file_name + '_m' + str(lift_option) + '_lift.npy')\n if lift_option == 2:\n #J1 = np.zeros((6, robot_model.qdot_size))\n #J2 = np.zeros((6, robot_model.qdot_size))\n J = np.zeros((12, robot_model.qdot_size))\n xdot = np.zeros(12)\n qdot = np.zeros(robot_model.qdot_size)\n desired_LH_lift_pose = np.load(file_name+'_lift_LH_EE.npy')\n desired_RH_lift_pose = np.load(file_name+'_lift_RH_EE.npy')\n print(\"\\033[31mDONE!! \\033[0m\")\n\n# Send Commands\nif reach_option == 2:\n joint_reach_trajectory = np.empty((desired_LH_reach_pose.shape[0], robot_model.q_size))\n q = q_init.copy()\n joint_reach_trajectory[0, :] = q[:]\n for ii in range(desired_LH_reach_pose.shape[0]-1):\n #for ii in range(N-1):\n print(\"Generating REACHING %d/%d...\" % (ii+1, desired_LH_reach_pose.shape[0]))\n #error1 = compute_cartesian_error(desired_LH_lift_pose[ii, :], actual_LH_lift_pose, rotation_rep='quat')\n #error2 = compute_cartesian_error(desired_RH_lift_pose[ii, :], actual_RH_lift_pose, rotation_rep='quat')\n\n xdot[:6] = compute_cartesian_error(desired_LH_reach_pose[ii+1, :], desired_LH_reach_pose[ii, :], rotation_rep='quat')#error1\n xdot[6:] = compute_cartesian_error(desired_RH_reach_pose[ii+1, :], desired_RH_reach_pose[ii, :], rotation_rep='quat')#error1\n\n ## Compute the jacobian matrix\n ##rbdl.CalcPointJacobian6D(robot_model.model, q, model.GetBodyId(LH_name), np.zeros(0), J1, True)\n #robot_model.update_jacobian(J1, LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n #robot_model.update_jacobian(J2, RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n #J1[:, bigman_params['joint_ids']['TO']] = 0\n #J2[:, bigman_params['joint_ids']['TO']] = 0\n #qdot = np.linalg.lstsq(J1, xdot)[0]\n #qdot2 = np.linalg.lstsq(J2, xdot2)[0]\n #qdot[bigman_params['joint_ids']['RA']] = qdot2[bigman_params['joint_ids']['RA']]\n\n # Compute the jacobian matrix\n #rbdl.CalcPointJacobian6D(robot_model.model, q, model.GetBodyId(LH_name), np.zeros(0), J1, True)\n robot_model.update_jacobian(J[:6, :], LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J[6:, :], RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n #Note: lstsq is faster than pinv and then dot\n qdot[:] = np.linalg.lstsq(J, xdot)[0]\n\n q[:] += qdot\n\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n\n joint_reach_trajectory[ii+1, :] = q[:]\n\nif lift_option == 2:\n joint_lift_trajectory = np.empty((desired_LH_lift_pose.shape[0], robot_model.q_size))\n q = q_reach.copy()\n joint_lift_trajectory[0, :] = q[:]\n for ii in range(desired_LH_lift_pose.shape[0]-1):\n #for ii in range(N-1):\n print(\"Generating LIFTING %d/%d...\" % (ii+1, desired_LH_lift_pose.shape[0]))\n #error1 = compute_cartesian_error(desired_LH_lift_pose[ii, :], actual_LH_lift_pose, rotation_rep='quat')\n #error2 = compute_cartesian_error(desired_RH_lift_pose[ii, :], actual_RH_lift_pose, rotation_rep='quat')\n\n xdot[:6] = compute_cartesian_error(desired_LH_lift_pose[ii+1, :], desired_LH_lift_pose[ii, :], rotation_rep='quat')#error1\n xdot[6:] = compute_cartesian_error(desired_RH_lift_pose[ii+1, :], desired_RH_lift_pose[ii, :], rotation_rep='quat')#error1\n\n ## Compute the jacobian matrix\n ##rbdl.CalcPointJacobian6D(robot_model.model, q, model.GetBodyId(LH_name), np.zeros(0), J1, True)\n #robot_model.update_jacobian(J1, LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n #robot_model.update_jacobian(J2, RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n #J1[:, bigman_params['joint_ids']['TO']] = 0\n #J2[:, bigman_params['joint_ids']['TO']] = 0\n #qdot = np.linalg.lstsq(J1, xdot)[0]\n #qdot2 = np.linalg.lstsq(J2, xdot2)[0]\n #qdot[bigman_params['joint_ids']['RA']] = qdot2[bigman_params['joint_ids']['RA']]\n\n # Compute the jacobian matrix\n #rbdl.CalcPointJacobian6D(robot_model.model, q, model.GetBodyId(LH_name), np.zeros(0), J1, True)\n robot_model.update_jacobian(J[:6, :], LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n robot_model.update_jacobian(J[6:, :], RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n #Note: lstsq is faster than pinv and then dot\n qdot[:] = np.linalg.lstsq(J, xdot)[0]\n #qdot = np.linalg.pinv(J).dot(xdot)\n\n q[:] += qdot\n\n actual_LH_pose = robot_model.fk(LH_name, q=q, body_offset=l_soft_hand_offset, update_kinematics=True)\n actual_RH_pose = robot_model.fk(RH_name, q=q, body_offset=r_soft_hand_offset, update_kinematics=True)\n\n joint_lift_trajectory[ii+1, :] = q[:]\n\n\nif save_reach_traj:\n np.save(file_name + '_m' + str(reach_option) + '_reach.npy', joint_reach_trajectory)\n if reach_option == 2:\n np.save(file_name+'_reach_LH_EE.npy', desired_LH_reach_pose)\n np.save(file_name+'_reach_RH_EE.npy', desired_RH_reach_pose)\n\nif save_lift_traj:\n np.save(file_name + '_m' + str(lift_option) + '_lift.npy', joint_lift_trajectory)\n if lift_option == 2:\n np.save(file_name+'_lift_LH_EE.npy', desired_LH_lift_pose)\n np.save(file_name+'_lift_RH_EE.npy', desired_RH_lift_pose)\n\n#plt.plot(desired_LH_lift_pose[:, -1], 'r')\n#plt.show()\nif plot_at_the_end:\n joints_to_plot = bigman_params['joint_ids']['LA']\n cols = 3\n joint_names = [bigman_params['joints_names'][idx] for idx in joints_to_plot]\n #plot_joint_info(joints_to_plot, joint_reach_trajectory, joint_names, data='position', block=False)\n #qdots_reach = np.vstack((np.diff(joint_reach_trajectory, axis=0), np.zeros((1, robot_model.qdot_size))))\n #plot_joint_info(joints_to_plot, qdots_reach*freq, joint_names, data='velocity', block=False)\n #qddots_reach = np.vstack((np.diff(qdots_reach, axis=0), np.zeros((1, robot_model.qdot_size))))\n #plot_joint_info(joints_to_plot, qddots_reach*freq*freq, joint_names, data='acceleration', block=False)\n\n plot_joint_info(joints_to_plot, joint_lift_trajectory, joint_names, data='position', block=False)\n qdots_lift = np.vstack((np.diff(joint_lift_trajectory, axis=0), np.zeros((1, robot_model.qdot_size))))\n plot_joint_info(joints_to_plot, qdots_lift*freq, joint_names, data='velocity', block=False)\n qddots_lift = np.vstack((np.diff(qdots_lift, axis=0), np.zeros((1, robot_model.qdot_size))))\n plot_joint_info(joints_to_plot, qddots_lift*freq*freq, joint_names, data='acceleration', block=False)\n\n raw_input(\"Press a key to close the script\")\n\n\n" }, { "alpha_fraction": 0.6244493126869202, "alphanum_fraction": 0.6350220441818237, "avg_line_length": 26.349397659301758, "blob_id": "d951765d8171c64a55815376922b685745a27748", "content_id": "c470a0c4829da60f9656c62a708539a006919509", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4540, "license_type": "permissive", "max_line_length": 87, "num_lines": 166, "path": "/examples/rl_algos/ddpg/gym_ddpg.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Reinforce on Pusher2D3DofGoalCompoEnv.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\n\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\nfrom robolearn.utils.data_management import SimpleReplayBuffer\nimport gym\n\nfrom robolearn.torch.algorithms.rl_algos.ddpg import DDPG\n\nfrom robolearn.torch.models import NNQFunction\nfrom robolearn.torch.policies import TanhMlpPolicy\nfrom robolearn.utils.exploration_strategies import OUStrategy\nfrom robolearn.utils.exploration_strategies import PolicyWrappedWithExplorationStrategy\n\nimport argparse\n\n\ndef experiment(variant):\n ptu.set_gpu_mode(variant['gpu'])\n\n env = NormalizedBoxEnv(\n gym.make(variant['env_name'])\n )\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n net_size = variant['net_size']\n\n qf = NNQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size]\n )\n policy = TanhMlpPolicy(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size],\n )\n es = OUStrategy(\n action_space=env.action_space,\n mu=0,\n theta=0.15,\n max_sigma=0.3,\n min_sigma=0.3,\n decay_period=100000,\n )\n exploration_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=es,\n policy=policy,\n )\n\n replay_buffer = SimpleReplayBuffer(\n variant['algo_params']['replay_buffer_size'],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n variant['algo_params']['replay_buffer'] = replay_buffer\n\n # QF Plot\n # variant['algo_params']['epoch_plotter'] = None\n\n algorithm = DDPG(\n explo_env=env,\n # training_env=env,\n save_environment=False,\n policy=policy,\n explo_policy=exploration_policy,\n qf=qf,\n **variant['algo_params']\n )\n if ptu.gpu_enabled():\n algorithm.cuda()\n algorithm.train()\n\n return algorithm\n\n\nPATH_LENGTH = 1000\nPATHS_PER_EPOCH = 1\nPATHS_PER_EVAL = 1\n\nexpt_params = dict(\n algo_params=dict(\n # Common RLAlgo params\n num_epochs=100, # n_epochs\n num_steps_per_epoch=PATHS_PER_EPOCH * PATH_LENGTH,\n num_updates_per_train_call=1, # How to many run algorithm train fcn\n num_steps_per_eval=PATHS_PER_EVAL * PATH_LENGTH,\n # EnvSampler params\n max_path_length=PATH_LENGTH, # max_path_length\n render=False,\n # ReplayBuffer params\n batch_size=64, # batch_size\n replay_buffer_size=1e4,\n # DDPG params\n # TODO: epoch_plotter\n policy_learning_rate=1e-4,\n qf_learning_rate=1e-3,\n use_soft_update=True,\n tau=1e-2,\n\n discount=0.99,\n reward_scale=1.0,\n ),\n net_size=64\n)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='mountaincar')\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='gap_and_last')\n parser.add_argument('--snap_gap', type=int, default=10)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--gpu', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n expt_variant = expt_params\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = 'gym_'+args.env\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n if args.env == 'mountaincar':\n expt_variant['env_name'] = 'MountainCarContinuous-v0'\n else:\n raise NotImplementedError\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algo = experiment(expt_variant)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.585781991481781, "alphanum_fraction": 0.585781991481781, "avg_line_length": 18.18181800842285, "blob_id": "505704eab3905828d33f568784cff7ea6a557dc4", "content_id": "1053b462067a75e6618a858fe9f6fc78d7e89f69", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "permissive", "max_line_length": 72, "num_lines": 55, "path": "/robolearn/models/policies/base.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import abc\nfrom future.utils import with_metaclass\n\n\nclass Policy(with_metaclass(abc.ABCMeta, object)):\n \"\"\"\n General policy superclass.\n :math:`a_t = pi(s_t)`\n \"\"\"\n def __init__(self, action_dim):\n self._action_dim = action_dim\n\n @abc.abstractmethod\n def get_action(self, *args, **kwargs):\n \"\"\"\n\n :param observation:\n :return: action, debug_dictionary\n \"\"\"\n pass\n\n def reset(self):\n pass\n\n @property\n def action_dim(self):\n return self._action_dim\n\n\nclass ExplorationPolicy(Policy):\n \"\"\"\n Exploration Policy\n \"\"\"\n def set_num_steps_total(self, t):\n pass\n\n\nclass SerializablePolicy(Policy):\n \"\"\"\n Policy that can be serialized.\n \"\"\"\n def get_param_values(self):\n return None\n\n def set_param_values(self, values):\n pass\n\n \"\"\"\n Parameters should be passed as np arrays in the two functions below.\n \"\"\"\n def get_param_values_np(self):\n return None\n\n def set_param_values_np(self, values):\n pass\n" }, { "alpha_fraction": 0.6023890972137451, "alphanum_fraction": 0.6143344640731812, "avg_line_length": 41.85365676879883, "blob_id": "f96f75f8ed293ce5b3bf2205e5bdabcc57acb463", "content_id": "3c016e2f08697aa2449946d747e5c5905fb39cc3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1758, "license_type": "permissive", "max_line_length": 81, "num_lines": 41, "path": "/scenarios/humanoids2018/plots/plot_sum_specific_costs.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom robolearn.v010.utils.plots.sum_specific_costs import plot_sum_specific_costs\nfrom builtins import input\n\nmethod = 'gps' # 'gps' or 'trajopt'\n\noption = 0 # 0: plot mdgps-bmdgps-dmdgps | 1: plot remove_bad experiment\nitr_to_load = None # list(range(8))\nblock = False\nspecific_costs = [1, 2] #[3, 4] #None # None for all costs\nlatex_plot = True\n\nif option == 0:\n # Paper logs: Methods comparison, Distance to Tgt plot\n gps_directory_names = ['mdgps_log'] #, 'bmdgps_log', 'dmdgps_log']\n gps_models_labels = ['MDGPS'] #, 'B-MDGPS', 'D-MDGPS']\nelif option == 1:\n # Papers logs:\n gps_directory_names = ['mdgps_log'] #, 'mdgps_no1_log', 'mdgps_no2_log']\n gps_models_labels = ['MDGPS'] #, 'MDGPS no 1/6 worst', 'MDGPS no 2/6 worst']\nelse:\n raise ValueError(\"Wrong script option '%s'\" % str(option))\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\nconds_to_combine = list([0])\nplot_sum_specific_costs(dir_names, itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels, block=block,\n conds_to_combine=conds_to_combine,\n specific_costs=specific_costs, latex_plot=latex_plot,\n plot_title='Training conditions')\n\nconds_to_combine = list([0])\nplot_sum_specific_costs(dir_names, itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels, block=block,\n conds_to_combine=conds_to_combine,\n specific_costs=specific_costs, latex_plot=latex_plot,\n plot_title='Test condition')\n\ninput('Showing plots. Press a key to close...')\n\n" }, { "alpha_fraction": 0.8070175647735596, "alphanum_fraction": 0.8070175647735596, "avg_line_length": 27.5, "blob_id": "9ddba7d019b01fb691a3b4fe2fc850576ffa6f8e", "content_id": "aafebad6c25b6004fe92921766dc27a809597120", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "permissive", "max_line_length": 35, "num_lines": 2, "path": "/robolearn/torch/utils/nn/networks/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .mlp import Mlp\nfrom .flatten_mlp import FlattenMlp\n" }, { "alpha_fraction": 0.6917740106582642, "alphanum_fraction": 0.6957383751869202, "avg_line_length": 30.46875, "blob_id": "0ca871f4803633f5b91e5bb211847a172390f1f0", "content_id": "a7189f8dd5e6c78a7616efaed731b1bb1184e320", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1009, "license_type": "permissive", "max_line_length": 81, "num_lines": 32, "path": "/examples/miscellaneous/tf_save_vars.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\ncounter = tf.Variable(tf.zeros([1]), name=\"counter\")\nvariable_to_save = tf.Variable(tf.random_normal([2, 3]), name=\"variable_to_save\")\nincrement_counter = tf.assign(counter, counter+1)\n\n#global_init = tf.global_variables_initializer()\nall_init = tf.global_variables_initializer()\nsome_init = tf.variables_initializer([counter], name='some_init')\n\n# Add ops to save and restore all the variables.\n#saver = tf.train.Saver() # Save all variables\nsaver = tf.train.Saver({'variable_to_save': variable_to_save})\n\n\nwith tf.Session() as sess:\n sess.run(all_init)\n #sess.run(some_init)\n\n print(sess.run(variable_to_save))\n print(sess.run(counter))\n sess.run(increment_counter)\n\n # Restore variables from disk.\n saver.restore(sess, \"models/tf-test-model.ckpt\")\n print(\"Model restored.\")\n\n print(sess.run(variable_to_save))\n print(sess.run(counter))\n\n save_path = saver.save(sess, \"models/tf-test-model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n\n\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 41, "blob_id": "38182979bd3df5ad6a1eab79a85da20167463afd", "content_id": "bbba1bea98013f060533941228d13d893a0d4baf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "permissive", "max_line_length": 41, "num_lines": 1, "path": "/robolearn/envs/simple_envs/crawler/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .crawler_env import CrawlingRobotEnv\n" }, { "alpha_fraction": 0.6353467702865601, "alphanum_fraction": 0.6375839114189148, "avg_line_length": 20.285715103149414, "blob_id": "cf6b217e06f8577de39cf18e5a8a3ea1d87003af", "content_id": "c6862c0a03fbba56ea2c16c49bcc540aff7cfdb8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "permissive", "max_line_length": 70, "num_lines": 21, "path": "/robolearn/utils/data_management/fake_replay_buffer.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\n\nfrom robolearn.utils.data_management.replay_buffer import ReplayBuffer\n\n\nclass FakeReplayBuffer(ReplayBuffer):\n def __init__(self):\n pass\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n pass\n\n def terminate_episode(self):\n pass\n\n def random_batch(self, batch_size):\n pass\n\n def available_samples(self):\n return -1\n" }, { "alpha_fraction": 0.5489035844802856, "alphanum_fraction": 0.5819700956344604, "avg_line_length": 26.232227325439453, "blob_id": "da4d9ae52a930012560de4e8160f0be5dc9f59e9", "content_id": "209de5e298bff9218650f13198a18e1a3506b208", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5746, "license_type": "permissive", "max_line_length": 75, "num_lines": 211, "path": "/examples/mujoco_envs/mujoco_all_sql.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nRun PyTorch Soft Q-Learning on some Gym Envs.\n\nNOTE: You need PyTorch 0.4\n\"\"\"\nimport gym\nimport numpy as np\n\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\nfrom robolearn.utils.launchers.launcher_util import setup_logger\n\nfrom robolearn.torch.policies import SamplingPolicy\nfrom robolearn.torch.algorithms.rl_algos import SQL\nfrom robolearn.torch.models import NNQFunction\n\nimport argparse\n\n\ndef experiment(variant):\n ptu._use_gpu = variant['gpu']\n\n env = NormalizedBoxEnv(gym.make(variant['env_name']))\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n net_size = variant['net_size']\n qf = NNQFunction(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size],\n )\n if ptu.gpu_enabled():\n qf.cuda()\n\n policy = SamplingPolicy(\n obs_dim=obs_dim,\n action_dim=action_dim,\n hidden_sizes=[net_size, net_size],\n )\n if ptu.gpu_enabled():\n policy.cuda()\n\n algorithm = SQL(\n env=env,\n qf=qf,\n policy=policy,\n **variant['algo_params']\n )\n\n if ptu.gpu_enabled():\n algorithm.cuda()\n algorithm.train()\n\n return algorithm\n\nSHARED_PARAMS = dict(\n # Common RLAlgo params\n num_steps_per_epoch=1000, # Epoch length\n num_updates_per_env_step=1, # Like n_train_repeat??\n num_steps_per_eval=1000, # like eval_n_episodes??\n # EnvSampler params\n max_path_length=1000,\n render=False,\n # ReplayBuffer params\n batch_size=128,\n min_buffer_size=1000, # Minimum buffer size to start training\n replay_buffer_size=1e6,\n # SoftQLearning params\n plotter=None,\n policy_lr=3e-4,\n qf_lr=3e-4,\n value_n_particles=16,\n # use_hard_updates=True, # Hard update for target Q-fcn\n use_hard_updates=False, # CHANGED ON 27-04\n hard_update_period=1000, # tf_target_update_interval\n soft_target_tau=0.001, # Not used if use_hard_updates=True\n # TODO:kernel_fn\n kernel_n_particles=16,\n kernel_update_ratio=0.5,\n discount=0.99,\n)\n\nENV_PARAMS = {\n 'swimmer': dict(\n env_name='Swimmer-v2',\n algo_params=dict(\n num_epochs=500,\n max_path_length=1000,\n min_buffer_size=1000,\n reward_scale=30,\n ),\n net_size=128,\n ),\n 'hopper': dict(\n env_name='Hopper-v2',\n algo_params=dict(\n num_epochs=2000,\n max_path_length=1000,\n min_buffer_size=1000,\n reward_scale=30,\n ),\n net_size=128,\n ),\n 'half-cheetah': dict(\n env_name='HalfCheetah-v2',\n algo_params=dict(\n num_epochs=10000,\n max_path_length=1000,\n min_buffer_size=1000,\n replay_buffer_size=1e7,\n reward_scale=30,\n ),\n net_size=128,\n ),\n 'walker': dict(\n env_name='Walker2d-v2',\n algo_params=dict(\n num_epochs=5000,\n max_path_length=1000,\n min_buffer_size=1000,\n reward_scale=10,\n ),\n net_size=128,\n ),\n 'ant': dict(\n env_name='Ant-v2',\n algo_params=dict(\n num_epochs=10000,\n max_path_length=1000,\n min_buffer_size=1000,\n reward_scale=300,\n ),\n net_size=128,\n ),\n 'humanoid': dict(\n env_name='Humanoid-v2',\n algo_params=dict(\n num_epochs=20000,\n max_path_length=1000,\n min_buffer_size=1000,\n # reward_scale=100, # before 30/04\n # reward_scale=300, # 30/04\n # reward_scale=50, # 01/05\n reward_scale=10, # 09/05\n ),\n net_size=128,\n ),\n}\n\nAVAILABLE_ENVS = list(ENV_PARAMS.keys())\nDEFAULT_ENV = 'half-cheetah'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env',\n type=str,\n choices=AVAILABLE_ENVS,\n default=DEFAULT_ENV)\n parser.add_argument('--net_size', type=int, default=None)\n parser.add_argument('--expt_name', type=str, default=None)\n # parser.add_argument('--expt_name', type=str, default=timestamp())\n # Logging arguments\n parser.add_argument('--snap_mode', type=str, default='last')\n parser.add_argument('--snap_gap', type=int, default=100)\n # parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--gpu', action=\"store_true\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.env is None:\n expt_variant = ENV_PARAMS[DEFAULT_ENV]\n else:\n expt_variant = ENV_PARAMS[args.env]\n\n default_algo_params = SHARED_PARAMS\n for param in default_algo_params:\n if param not in expt_variant['algo_params'].keys():\n expt_variant['algo_params'][param] = default_algo_params[param]\n\n # Net size\n if args.net_size is not None:\n expt_variant['net_size'] = args.net_size\n\n expt_variant['gpu'] = args.gpu\n\n # Experiment name\n if args.expt_name is None:\n expt_name = expt_variant['env_name']\n else:\n expt_name = args.expt_name\n\n expt_variant['algo_params']['render'] = args.render\n\n setup_logger(expt_name,\n variant=expt_variant,\n snapshot_mode=args.snap_mode,\n snapshot_gap=args.snap_gap,\n log_dir=args.log_dir)\n algorithm = experiment(expt_variant)\n\n input('Press a key to close the script...')\n" }, { "alpha_fraction": 0.6725663542747498, "alphanum_fraction": 0.6725663542747498, "avg_line_length": 23.565217971801758, "blob_id": "f2d87ba74a9249728daec1bf950609f786a5a5a9", "content_id": "4c8c539a54667d25fb342dafb65342a6e5592604", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "permissive", "max_line_length": 69, "num_lines": 23, "path": "/robolearn/algorithms/traj_opt/base.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file defines the base trajectory optimization class.\nBased on Finn-GPS\n\n\"\"\"\nimport abc\nfrom future.utils import with_metaclass\n\n\nclass TrajOpt(object):\n \"\"\" Trajectory optimization superclass. \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, hyperparams):\n self._hyperparams = hyperparams\n\n @abc.abstractmethod\n def update(self, *args, **kwargs):\n \"\"\" Update trajectory distribution. \"\"\"\n raise NotImplementedError(\"Must be implemented in subclass.\")\n\n def set_logger(self, logger):\n self.logger = logger\n" }, { "alpha_fraction": 0.6123347878456116, "alphanum_fraction": 0.634361207485199, "avg_line_length": 14.133333206176758, "blob_id": "556cc5802caa145e26075bc01290284ce042880a", "content_id": "6971582f603eb979d8b290318896cb74cc7008c6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "permissive", "max_line_length": 39, "num_lines": 15, "path": "/examples/mujoco_envs/test_cheetah.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import gym\nimport time\n\nenv = gym.make('HalfCheetah-v2')\n\nenv.reset()\nenv.render()\nii = 0\nwhile True:\n time.sleep(0.1)\n ii += 1\n print(ii)\n env.step(env.action_space.sample())\n env.render()\nprint(\"ME CERRRROOO\")\n" }, { "alpha_fraction": 0.6459227204322815, "alphanum_fraction": 0.6459227204322815, "avg_line_length": 32.21428680419922, "blob_id": "ce3c9b3d28007d82a8bb090d6159ba81824bcfba", "content_id": "2dd591684bee626ee46fec3bc7c25a03a2526df9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "permissive", "max_line_length": 57, "num_lines": 14, "path": "/robolearn/torch/policies/tanh_mlp_policy.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.utils.serializable import Serializable\nfrom robolearn.torch.policies.mlp_policy import MlpPolicy\n\n\nclass TanhMlpPolicy(MlpPolicy, Serializable):\n def __init__(self, *args, **kwargs):\n # self._serializable_initialized = False\n # Serializable.quick_init(self, locals())\n\n self.save_init_params(locals())\n super(TanhMlpPolicy, self).__init__(\n *args,\n output_activation='tanh',\n **kwargs)\n\n" }, { "alpha_fraction": 0.5857142806053162, "alphanum_fraction": 0.6021978259086609, "avg_line_length": 38.565216064453125, "blob_id": "6b1642abd68e62323ac814271338b7a16249b70c", "content_id": "a029281ae9c2f08255220bcf25a1a8f20f48604d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "permissive", "max_line_length": 96, "num_lines": 23, "path": "/scenarios/tests/plots/policy_final_safe_distance.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom builtins import input\nimport numpy as np\nfrom robolearn.old_utils.plots.policy_final_safe_distance import plot_policy_final_safe_distance\n\nmethod = 'gps' # 'gps' or 'trajopt'\ngps_directory_names = ['reacher_log']#, 'reacher_log2', 'reacher_log3']\ngps_models_labels = ['gps1']#, 'gps2', 'gps3']\nsafe_distance = 0.15\nsafe_states_tuples = [(6, 12), (7, 13)]\nitr_to_load = None # list(range(8))\nblock = False\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\nplot_policy_final_safe_distance(dir_names, safe_states_tuples,\n itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels,\n safe_distance=safe_distance,\n block=block)\n\ninput('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.6488549709320068, "alphanum_fraction": 0.6549618244171143, "avg_line_length": 35.38888931274414, "blob_id": "af9dd7904b23562757e024194aef1999019c0382", "content_id": "04c2ca18f906e249248e0b0286e19ccfdff36bee", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "permissive", "max_line_length": 76, "num_lines": 18, "path": "/scenarios/dualist_gps/bigman/plots/specific_cost.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nfrom robolearn.old_utils.plots.specific_cost import plot_specific_cost\n\nmethod = 'gps' # 'gps' or 'trajopt'\ngps_directory_names = ['gps_log1']\ngps_models_labels = ['gps_log1']\nitr_to_load = None # list(range(8))\nblock = False\nspecific_costs = None #[4] # None for all costs\n\ndir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name\n for dir_name in gps_directory_names]\n\nplot_specific_cost(dir_names, itr_to_load=itr_to_load, method=method,\n gps_models_labels=gps_models_labels, block=block,\n specific_costs=specific_costs)\n\ninput('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5173754692077637, "alphanum_fraction": 0.5200486183166504, "avg_line_length": 30.89922523498535, "blob_id": "8b8e11a4faa8d92c20fdca0aa50ec2b5f9474512", "content_id": "af463ff2f28e5c8607a03fc0c2d9f17d380e4d24", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4115, "license_type": "permissive", "max_line_length": 80, "num_lines": 129, "path": "/examples/rl_algos/gps/main.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nimport yaml\nimport random\nimport numpy as np\nimport shutil\nfrom scenario import Scenario\nfrom builtins import input\n\n\ndef main():\n # ##################### #\n # Commandline Arguments #\n # ##################### #\n parser = argparse.ArgumentParser()\n parser.add_argument('--scenario', type=str, default='mdgps')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--mode', type=str, default='train')\n parser.add_argument('--run_num', type=int, default=0)\n parser.add_argument('--itr', type=int, default=-1)\n parser.add_argument('--cond', type=int, default=0)\n parser.add_argument('--render', action=\"store_true\")\n parser.add_argument('--local', action=\"store_true\")\n parser.add_argument('--log_dir', type=str, default='NOTHING')\n\n args = parser.parse_args()\n print('command_line args:', args)\n\n\n # ############### #\n # Log directories #\n # ############### #\n if args.log_dir == 'NOTHING':\n log_prefix = args.scenario\n else:\n log_prefix = args.log_dir\n log_dir = str(log_prefix)+('_log/run_%02d' % args.run_num)\n if args.mode == 'train':\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n else:\n # replace_dir = input(\"Log directory '%s' already exists!. \"\n # \"Press [y/Y] to replace it and continue with \"\n # \"the script\"\n # \"replace it? [y/Y]: \" % log_dir)\n replace_dir = 'y'\n # replace_dir = 'y'\n if replace_dir.lower() == 'y':\n shutil.rmtree(log_dir)\n os.makedirs(log_dir)\n else:\n print('Finishing the script!!!')\n exit()\n elif args.mode == 'test':\n if not os.path.exists(log_dir):\n raise ValueError(\"It does not exist log directory '%s'\"\n % log_dir)\n else:\n print(\"Testing script with log directory '%s'!\" % log_dir)\n elif args.mode == 'eval':\n if not os.path.exists(log_dir):\n raise ValueError(\"It does not exist log directory '%s'\"\n % log_dir)\n else:\n print(\"Testing script with log directory '%s'!\" % log_dir)\n else:\n raise ValueError('Wrong script option')\n\n # ############# #\n # Load Scenario #\n # ############# #\n hyperparam_dict = dict()\n hyperparam_dict['scenario'] = args.scenario\n hyperparam_dict['seed'] = args.seed\n hyperparam_dict['run_num'] = args.run_num\n hyperparam_dict['render'] = args.render\n hyperparam_dict['log_dir'] = log_dir\n\n scenario = Scenario(hyperparam_dict)\n\n # ############# #\n # Set variables #\n # ############# #\n random.seed(args.seed)\n np.random.seed(args.seed)\n scenario.env.seed(args.seed)\n\n\n # ####################### #\n # Dump Parameters to file #\n # ####################### #\n hyperparam_dict['task_params'] = scenario.task_params\n\n with open(log_dir+'/hyperparameters.yaml', 'w') as outfile:\n yaml.dump(hyperparam_dict, outfile, default_flow_style=False)\n\n\n # scenario.env.reset(condition=0)\n # scenario.env.render(mode='human')\n\n # ########### #\n # SCRIPT MODE #\n # ########### #\n if args.mode == 'train':\n successful = scenario.train()\n elif args.mode == 'test':\n if args.local:\n pol_type = 'local'\n else:\n pol_type = 'global'\n successful = scenario.test_policy(iteration=args.itr,\n condition=args.cond,\n pol_type=pol_type)\n input(\"Press a key to close the script\")\n elif args.mode == 'eval':\n successful = scenario.eval_dualism()\n else:\n raise ValueError('Wrong script option')\n\n if successful:\n print('#'*40)\n print('The script has finished successfully!!!')\n print('#'*40)\n else:\n print('The script has NOT finished successfully!!!')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6571988463401794, "alphanum_fraction": 0.6571988463401794, "avg_line_length": 33, "blob_id": "1ee7dc2b9ca208d6002aaa8adfe393e5b25d084f", "content_id": "e0cb8481457f36ea0e1a6161526cff851f74721d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1021, "license_type": "permissive", "max_line_length": 76, "num_lines": 30, "path": "/robolearn/torch/policies/weighted_multi_policy_selector.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from robolearn.torch.core import PyTorchModule\nfrom robolearn.models.policies import ExplorationPolicy\n\n\nclass WeightedMultiPolicySelector(PyTorchModule, ExplorationPolicy):\n def __init__(self, multipolicy, idx):\n self.save_init_params(locals())\n super(WeightedMultiPolicySelector, self).__init__()\n ExplorationPolicy.__init__(self, multipolicy.action_dim)\n\n self._multipolicy = multipolicy\n self.idx = idx\n\n def get_action(self, *args, **kwargs):\n kwargs['pol_idx'] = self.idx\n action, policy_info = self._multipolicy.get_action(*args, **kwargs)\n\n return action, policy_info\n\n def get_actions(self, *args, **kwargs):\n kwargs['pol_idx'] = self.idx\n action, policy_info = self._multipolicy.get_actions(*args, **kwargs)\n\n return action, policy_info\n\n def forward(self, *nn_input, **kwargs):\n kwargs['pol_idx'] = self.idx\n action, policy_info = self._multipolicy(*nn_input, **kwargs)\n\n return action, policy_info\n\n" }, { "alpha_fraction": 0.5687398910522461, "alphanum_fraction": 0.5749889612197876, "avg_line_length": 33.34848403930664, "blob_id": "c1d5b4155a06062690c8b635bcca35d8e7b0a2ae", "content_id": "94f083adf495cde56ac8181d5c07f690afcd0ec6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13602, "license_type": "permissive", "max_line_length": 120, "num_lines": 396, "path": "/scenarios/tests/multi_ros_gazebo_env.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import sys\nimport socket\nimport psutil\nimport subprocess\nimport multiprocessing\nimport threading\nimport time\nimport os\nimport signal\nimport traceback\n\nfrom robolearn.old_envs.gazebo_ros_env_interface import GazeboROSEnvInterface\n\ninit_roscore_port = 11312\ninit_gzserver_port = 11347\n\n\nclass RosGazebo(multiprocessing.Process):\n def __init__(self, host='localhost'):\n super(RosGazebo, self).__init__()\n\n self.host = host\n self.roscore = None\n self.gzserver = None\n self.roslaunch = None\n self.roscore_port = None\n self.gzserver_port = None\n self.env_interface = None\n\n self.env_vars = os.environ.copy()\n\n self.close_pipe = multiprocessing.Pipe()\n self.reset_pipe = multiprocessing.Pipe()\n self.action_pipe = multiprocessing.Pipe()\n self.obs_queue = multiprocessing.Queue()\n self.state_queue = multiprocessing.Queue()\n\n self.wait_reset_thread = None\n self.wait_action_thread = None\n self.update_observation_thread = None\n self.update_state_thread = None\n\n self.running = True\n\n def run(self):\n try:\n self.start_all()\n\n if self.gzserver_port is None:\n gzserver_port = init_gzserver_port\n else:\n gzserver_port = self.gzserver_port\n self.gzserver_port = self.get_available_port(gzserver_port)\n\n os.environ[\"ROS_MASTER_URI\"] = 'http://%s:%d' % (str(self.host), self.roscore_port)\n os.environ[\"GAZEBO_MASTER_URI\"] = 'http://%s:%d' % (str(self.host), self.gzserver_port)\n\n self.roslaunch = self.run_roslaunch()\n\n action_types = list()\n action_topic_infos = list()\n observation_active = list()\n state_active = list()\n from std_msgs.msg import Float64\n from sensor_msgs.msg import JointState\n for ii in range(3):\n action_types.append({'name': 'joint_effort',\n 'dof': 1})\n action_topic_infos.append({'name': '/manipulator2d/joint'+str(ii)+'_position_controller/command',\n 'type': Float64,\n 'freq': 100})\n observation_active.append({'name': 'joint_state',\n 'type': 'joint_state',\n 'ros_class': JointState,\n 'fields': ['position', 'velocity'],\n 'joints': [0, 1, 2], # Joint IDs\n 'ros_topic': '/manipulator2d/joint_states',\n })\n state_active.append({'type': 'joint_state',\n 'fields': ['position', 'velocity'],\n 'joints': [0, 1, 2]})\n self.env_interface = GazeboROSEnvInterface(action_types=action_types, action_topic_infos=action_topic_infos,\n observation_active=observation_active,\n state_active=state_active)\n\n # Threads\n self.wait_reset_thread = threading.Thread(target=self.wait_reset, args=[])\n self.wait_reset_thread.start()\n self.wait_action_thread = threading.Thread(target=self.wait_action, args=[])\n self.wait_action_thread.start()\n self.update_observation_thread = threading.Thread(target=self.update_obs, args=[])\n self.update_observation_thread.start()\n self.update_state_thread = threading.Thread(target=self.update_state, args=[])\n self.update_state_thread.start()\n\n # Block function\n close_option = self.close_pipe[1].recv()\n\n if close_option == 'all':\n self.stop_all()\n else:\n raise ValueError(\"Wrong close_option %s\" % close_option)\n\n except Exception as e:\n print(\"Error in RosGazebo with PID:%d\" % self.pid)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=2, file=sys.stdout)\n self.stop_all()\n\n def wait_reset(self):\n while True:\n print('Waiting for reset request...')\n reset = self.reset_pipe[1].recv()\n self.env_interface.reset(time=reset[0], freq=reset[1], cond=reset[2])\n\n def wait_action(self):\n while True:\n print('Waiting for external command...')\n action = self.action_pipe[1].recv()\n self.env_interface.send_action(action)\n\n def update_obs(self):\n while True:\n if self.obs_queue.empty():\n self.obs_queue.put(self.env_interface.get_observation())\n\n def update_state(self):\n while True:\n if self.state_queue.empty():\n self.state_queue.put(self.env_interface.get_state())\n\n def reset(self, time=None, freq=None, cond=0):\n # TODO: FILTER ACTION TO AVOID BAD BEHAVIOR\n self.reset_pipe[0].send((time, freq, cond))\n\n def send_action(self, action):\n # TODO: FILTER ACTION TO AVOID BAD BEHAVIOR\n self.action_pipe[0].send(action)\n\n def get_observation(self):\n while self.obs_queue.empty():\n pass\n return self.obs_queue.get()\n\n def get_state(self):\n while self.state_queue.empty():\n pass\n return self.state_queue.get()\n\n def start(self):\n super(RosGazebo, self).start()\n\n def restart(self):\n if self.running is False:\n self.running = True\n self.run()\n else:\n print(\"RosGazebo is already running\")\n\n def stop(self):\n self.close_pipe[0].send('all')\n self.running = False\n\n def start_all(self):\n self.start_roscore()\n #self.start_gzserver()\n\n def start_roscore(self):\n # Run roscore\n if self.roscore_port is None:\n roscore_port = init_roscore_port\n else:\n roscore_port = self.roscore_port\n\n roscore_port = self.get_available_port(roscore_port)\n\n if self.roscore is None:\n print(\"Running roscore with port %d\" % roscore_port)\n self.roscore_port = roscore_port\n self.roscore = self.run_roscore(roscore_port)\n\n time.sleep(1) # Sleeping so the roscore ports can be opened\n\n def run_roscore(self, port):\n # TODO: Change subprocess.PIPE to a file\n self.env_vars[\"ROS_MASTER_URI\"] = 'http://%s:%d' % (str(self.host), port)\n roscore_subprocess = subprocess.Popen(['roscore', '-p', '%d' % port], shell=False, preexec_fn=os.setsid)#,\n # env=self.env_vars, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n #roscore_subprocess.wait()\n return roscore_subprocess\n\n def start_gzserver(self):\n if self.gzserver_port is None:\n gzserver_port = init_gzserver_port\n else:\n gzserver_port = self.gzserver_port\n\n gzserver_port = self.get_available_port(gzserver_port)\n roscore_port = self.roscore_port\n\n if self.roscore is None:\n print(\"Error running gzserver. There is not a roscore running at port %d.\" % roscore_port)\n else:\n print(\"Running gzserver with port %d.\" % gzserver_port)\n self.gzserver_port = gzserver_port\n self.gzserver = self.run_gzserver(gzserver_port, roscore_port)\n\n def run_gzserver(self, gz_port, roscore_port):\n self.env_vars[\"GAZEBO_MASTER_URI\"] = 'http://%s:%d' % (str(self.host), gz_port)\n gzserver_subprocess = subprocess.Popen(['rosrun', 'gazebo_ros', 'gzserver'], shell=False, preexec_fn=os.setsid,\n env=self.env_vars, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # roscore_subprocess.wait()\n return gzserver_subprocess\n\n def run_roslaunch(self):\n roslaunch_cmd = ['roslaunch',\n 'manipulator2d_gazebo',\n 'manipulator2d_world.launch']\n gzserver_subprocess = subprocess.Popen(roslaunch_cmd, shell=False, preexec_fn=os.setsid,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # roscore_subprocess.wait()\n return gzserver_subprocess\n\n def stop_all(self):\n self.stop_roslaunch()\n # self.stop_gzserver()\n self.stop_roscore()\n\n def stop_gzserver(self):\n if self.gzserver_port is None:\n print(\"There is not a gzserver running.\")\n else:\n print(\"Killing gzserver with port %d\" % self.gzserver_port)\n os.killpg(os.getpgid(self.gzserver.pid), signal.SIGTERM)\n self.gzserver = None\n self.gzserver_port = None\n\n def stop_roscore(self):\n if self.roscore_port is None:\n print(\"There is not a roscore running.\")\n else:\n print(\"Killing roscore with port %d\" % self.roscore_port)\n os.killpg(os.getpgid(self.roscore.pid), signal.SIGTERM)\n self.roscore = None\n self.roscore_port = None\n\n def stop_roslaunch(self):\n if self.roslaunch is None:\n print(\"There is not a roslaunch running.\")\n else:\n print(\"Killing roslaunch (PID %d)\" % self.roslaunch.pid)\n os.killpg(os.getpgid(self.roslaunch.pid), signal.SIGTERM)\n self.roslaunch = None\n\n def get_available_port(self, init_port):\n result = 0\n while result == 0:\n result = self.is_port_open(self.host, init_port)\n if result == 0:\n init_port += 1\n return init_port\n\n @staticmethod\n def is_port_open(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # return sock.connect_ex(('127.0.0.1', 11311))\n return sock.connect_ex((host, port))\n\n #def __del__(self):\n # print('termino MultiROS')\n # self.stop_all()\n\n\nclass MultiRosGazebo(object):\n def __init__(self, n_ros_gz, host='localhost'):\n self.host = host\n self.total_ros_gz = n_ros_gz\n\n # self.rosgazebos = [RosGazebo(host=self.host) for _ in range(self.total_ros_gz)]\n\n self.rosgazebos = [None for _ in range(self.total_ros_gz)]\n\n for ii in range(self.total_ros_gz):\n self.rosgazebos[ii] = RosGazebo('localhost')\n self.start([ii])\n\n def start(self, index=None):\n if index is None:\n index = range(self.total_ros_gz)\n\n for ii in index:\n self.rosgazebos[ii].start()\n print(\"rosgazebo[%d] is running with PID:%d\" % (ii, self.rosgazebos[ii].pid))\n time.sleep(2)\n\n def stop(self, index=None):\n if index is None:\n index = range(self.total_ros_gz)\n\n for ii in index:\n print(\"Stop rosgazebo[%d]\" % ii)\n #self.rosgazebos[ii].terminate()\n self.rosgazebos[ii].stop()\n\n def restart(self, index=None):\n if index is None:\n index = range(self.total_ros_gz)\n\n for ii in index:\n self.rosgazebos[ii].restart()\n print(\"Restarting rosgazebo[%d] with PID:%d\" % (ii, self.rosgazebos[ii].pid))\n time.sleep(2)\n\n def __del__(self):\n self.stop()\n\n\n#roslaunch_cmd = '/etc/bash -c source activate py27 && roslaunch manipulator2d_gazebo manipulator2d_world.launch'\n#roslaunch_cmd = 'ls'\n#gzserver_subprocess = subprocess.Popen('/bin/bash -c ls', shell=True, preexec_fn=os.setsid)#,\n#raw_input('borra')\n\nmulti_ros_gz = MultiRosGazebo(1)\n\n\n#for ii in range(multi_ros_gz.total_ros_gz):\n# print(multi_ros_gz.rosgazebos[ii].roscore_port)\n# print(multi_ros_gz.rosgazebos[ii].gzserver_port)\n\n#import sys\n#def background_imports(host, port):\n# import talker as talk\n# modulenames = set(sys.modules)&set(globals())\n# allmodules = [sys.modules[name] for name in modulenames]\n# print(\"AAAAAAAAAAAAAAAAAAAAAA\")\n# for ii in allmodules:\n# print(ii)\n# print(\"AAAAAAAAAAAAAAAAAAAAAA\")\n# os.environ[\"ROS_MASTER_URI\"] = 'http://%s:%d' % (str(host), port)\n# #input('aaa')\n# #prueba1 = talk.talker()\n\n#from talker_thread import background_imports\n\n#import threading\n#import multiprocessing\n#lock = multiprocessing.Lock()\n#for ii in range(multi_ros_gz.total_ros_gz):\n# port = multi_ros_gz.rosgazebos[ii].roscore_port\n# #thread = threading.Thread(target=background_imports, args=['localhost', port])\n# #thread.setDaemon(True)\n# #thread.start()\n# #thread.join()\n# process = multiprocessing.Process(target=background_imports, args=['localhost', port])\n# process.start()\n\n#print('sleeping')\n#time.sleep(5)\n\n# multi_ros_gz.start()\n\n\ntime.sleep(1)\n\nraw_input('send_action')\n\nimport numpy as np\n\nfor ii in range(200):\n print('get observation...')\n print(multi_ros_gz.rosgazebos[0].get_observation())\n print('get state...')\n print(multi_ros_gz.rosgazebos[0].get_state())\n print('sending action...')\n multi_ros_gz.rosgazebos[0].send_action(np.random.randn(3))\n #multi_ros_gz.rosgazebos[0].send_action(np.array([-0.2, 0.1, 0.4]))\n time.sleep(0.1)\n\nraw_input('reset')\nmulti_ros_gz.rosgazebos[0].reset()\n\nraw_input('stop')\n#time.sleep(5)\n\nmulti_ros_gz.stop()\n\ntime.sleep(1)\n\n#raw_input('restart')\n#multi_ros_gz.restart()\n\n#raw_input('stop')\n#multi_ros_gz.stop()\n\nraw_input('finish_script')\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 33, "blob_id": "8152187c44fb73b2dacbbb8d6fe45c0a0cff2276", "content_id": "116f0709bc4687fa67d32422eb48d7598495d4e3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "permissive", "max_line_length": 33, "num_lines": 2, "path": "/robolearn/torch/utils/nn/modules/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .huber_loss import HuberLoss\nfrom .layer_norm import LayerNorm\n" }, { "alpha_fraction": 0.8265306353569031, "alphanum_fraction": 0.8265306353569031, "avg_line_length": 38.20000076293945, "blob_id": "3fb8826ff18625173b986af29699e9e402c9a406", "content_id": "7749a15c35de3fc34b7214d542d78ee3da5d3e75", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "permissive", "max_line_length": 43, "num_lines": 5, "path": "/robolearn/utils/stdout/__init__.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "from .print_color import PrintColor\nfrom .print_color import change_print_color\nfrom .print_robotio import print_robotio\nfrom .print_skull import print_skull\nfrom .progress_bar import ProgressBar\n" }, { "alpha_fraction": 0.684920608997345, "alphanum_fraction": 0.7055555582046509, "avg_line_length": 38.375, "blob_id": "8f3dca5316876fd1efc7638ddeddbe0739f84354", "content_id": "c555c25bbd00d90c060b701db7d57768837de3e9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1260, "license_type": "permissive", "max_line_length": 116, "num_lines": 32, "path": "/scenarios/tests/load_plot_sample.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom robolearn.old_utils.plot_utils import plot_sample_list, plot_sample_list_distribution, plot_sample\n\ngps_directory_name = 'LOG_2017-07-20_08:24:36'\n\nsample_number = 0 # If None, plot all the samples and show their mean, min and max\ncond = 0 # Condition number\n\nplot_states = True\nplot_actions = True\nplot_obs = False\n\ngps_path = '/home/desteban/workspace/robolearn/scenarios/' + gps_directory_name\n\nsample = pickle.load(open(gps_path+'/cond_'+str('%02d' % cond)+'_sample_'+str('%02d' % sample_number)+'.pkl', 'rb'))\n\n# for cond in range(total_conditions):\n# plot_sample_list(sample_list[cond], data_to_plot='actions', block=False, cols=3)\n# #plot_sample_list(sample_list[cond], data_to_plot='states', block=False, cols=3)\n# #plot_sample_list(sample_list[cond], data_to_plot='obs', block=False, cols=3)\n# raw_input('Showing plots')\n\nif plot_actions:\n plot_sample(sample, data_to_plot='actions', block=False, cols=3, color='black')\nif plot_states:\n plot_sample(sample, data_to_plot='states', block=False, cols=3, color='green')\nif plot_obs:\n plot_sample(sample, data_to_plot='obs', block=False, cols=3, color='blue')\n\nraw_input('Showing plots. Press a key to close...')\n" }, { "alpha_fraction": 0.5401399731636047, "alphanum_fraction": 0.5421984195709229, "avg_line_length": 30.545454025268555, "blob_id": "3408b3edc38dbf1b652d5b118819abd65bbffdf5", "content_id": "c4406b0a60fb6203afcff25ec3fe946f9c7a9313", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2429, "license_type": "permissive", "max_line_length": 70, "num_lines": 77, "path": "/robolearn/torch/utils/nn/networks/mlp.py", "repo_name": "domingoesteban/robolearn", "src_encoding": "UTF-8", "text": "import torch.nn as nn\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.nn import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\n\n\nclass Mlp(PyTorchModule):\n def __init__(\n self,\n hidden_sizes,\n input_size,\n output_size,\n hidden_activation='relu',\n output_activation='linear',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0.1,\n output_w_init='xavier_normal',\n output_b_init_val=0.1,\n layer_norm=False,\n layer_norm_kwargs=None,\n ):\n self.save_init_params(locals())\n super(Mlp, self).__init__()\n\n if layer_norm_kwargs is None:\n layer_norm_kwargs = dict()\n\n self.input_size = input_size\n self.output_size = output_size\n\n self.hidden_activation = ptu.get_activation(hidden_activation)\n self.output_activation = ptu.get_activation(output_activation)\n\n self.layer_norm = layer_norm\n self.fcs = []\n self.layer_norms = []\n in_size = input_size\n\n for i, next_size in enumerate(hidden_sizes):\n fc = nn.Linear(in_size, next_size)\n in_size = next_size\n ptu.layer_init(\n layer=fc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"fc{}\".format(i), fc)\n self.fcs.append(fc)\n\n if self.layer_norm:\n ln = LayerNorm(next_size)\n self.__setattr__(\"shared_layer_norm{}\".format(i), ln)\n self.layer_norms.append(ln)\n\n self.last_fc = nn.Linear(in_size, output_size)\n ptu.layer_init(\n layer=self.last_fc,\n option=output_w_init,\n activation=output_activation,\n b=output_b_init_val\n )\n\n def forward(self, nn_input, return_preactivations=False):\n h = nn_input\n for i, fc in enumerate(self.fcs):\n h = fc(h)\n if self.layer_norm and i < len(self.fcs) - 1:\n h = self.layer_norms[i](h)\n h = self.hidden_activation(h)\n preactivation = self.last_fc(h)\n output = self.output_activation(preactivation)\n\n if return_preactivations:\n return output, preactivation\n else:\n return output\n" } ]
209
oscarteeninga/Metody-Rozpoznawania-Obrazow
https://github.com/oscarteeninga/Metody-Rozpoznawania-Obrazow
e4fb4a42901119d91e5564d35f754d47443fcb3b
862737c0742e694b17ce89e3d44d98cd69e8205f
fa8febbb0cf7bddd052f5d71a9a7b304d002f57c
refs/heads/master
2023-01-21T13:02:41.099606
2020-11-24T23:52:20
2020-11-24T23:52:20
303,652,766
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5853837728500366, "alphanum_fraction": 0.6121924519538879, "avg_line_length": 27.082473754882812, "blob_id": "1d4e56a2b01d3db68dcc47cb211de97a3d78d443", "content_id": "772097325e9b73ac5272e3906adc50ed774179cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 84, "num_lines": 97, "path": "/lab3/cs.py", "repo_name": "oscarteeninga/Metody-Rozpoznawania-Obrazow", "src_encoding": "UTF-8", "text": "from pylbfgs import owlqn\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.optimize as spopt\nimport scipy.fftpack as spfft\nimport scipy.ndimage as spimg\nimport imageio\nimport cvxpy as cvx\n\ndef dct2(x):\n return spfft.dct(spfft.dct(x.T, norm='ortho', axis=0).T, norm='ortho', axis=0)\n\ndef idct2(x):\n return spfft.idct(spfft.idct(x.T, norm='ortho', axis=0).T, norm='ortho', axis=0)\n\ndef evaluate(x, g, step):\n \"\"\"An in-memory evaluation callback.\"\"\"\n\n # we want to return two things: \n # (1) the norm squared of the residuals, sum((Ax-b).^2), and\n # (2) the gradient 2*A'(Ax-b)\n\n # expand x columns-first\n x2 = x.reshape((nx, ny)).T\n\n # Ax is just the inverse 2D dct of x2\n Ax2 = idct2(x2)\n\n # stack columns and extract samples\n Ax = Ax2.T.flat[ri].reshape(b.shape)\n\n # calculate the residual Ax-b and its 2-norm squared\n Axb = Ax - b\n fx = np.sum(np.power(Axb, 2))\n\n # project residual vector (k x 1) onto blank image (ny x nx)\n Axb2 = np.zeros(x2.shape)\n Axb2.T.flat[ri] = Axb # fill columns-first\n\n # A'(Ax-b) is just the 2D dct of Axb2\n AtAxb2 = 2 * dct2(Axb2)\n AtAxb = AtAxb2.T.reshape(x.shape) # stack columns\n\n # copy over the gradient vector\n np.copyto(g, AtAxb)\n\n return fx\n\n# fractions of the scaled image to randomly sample at\nsample_sizes = (0.5, 0.25, 0.1, 0.05)\n\n# read original image\nXorig = imageio.imread('catalina2.png')\nny,nx,nchan = Xorig.shape\n\n# for each sample size\nmasks = [np.zeros(Xorig.shape, dtype='uint8') for s in sample_sizes]\nfor i,s in enumerate(sample_sizes):\n\n # create random sampling index vector\n k = round(nx * ny * s)\n ri = np.random.choice(nx * ny, k, replace=False) # random sample of indices\n print(ri[0:200])\n # def filtr(x):\n # if x > nx*(ny-450) and x < nx*(ny-350):\n # if x % ny < 175 and x % ny > 75:\n # print(\"dupa\")\n # return False\n # else:\n # return True\n # else:\n # return True\n\n # ri = list(filter(filtr, ri))\n n = 2\n ri = [x for x in range(0, nx*ny, 5)]\n print(ri[0:200])\n # for each color channel\n\n X = Xorig[:,].squeeze()\n\n # create images of mask (for visualization)\n Xm = 255 * np.ones(X.shape)\n Xm.T.flat[ri] = X.T.flat[ri]\n masks[i][:,] = Xm\n\n # take random samples of image, store them in a vector b\n b = X.T.flat[ri].astype(float)\n\n # perform the L1 minimization in memory\n Xat2 = owlqn(nx*ny, evaluate, None, 5)\n\n # transform the output back into the spatial domain\n Xat = Xat2.reshape(nx, ny).T # stack columns\n Xa = idct2(Xat)\n imageio.imwrite('catalina2-' + str(i) + str(s) + '.png', Xa)" }, { "alpha_fraction": 0.610691249370575, "alphanum_fraction": 0.6385253667831421, "avg_line_length": 35.66216278076172, "blob_id": "1718c8684ae5ab867a993ca5262ee18b77635f96", "content_id": "be35fb8eccf13541ec07d08312bc515bd1f86356", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5425, "license_type": "no_license", "max_line_length": 116, "num_lines": 148, "path": "/lab2.2/ght.py", "repo_name": "oscarteeninga/Metody-Rozpoznawania-Obrazow", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport sys\n\n#some basic functions for ease\ndef load_img(name: str,mode: int) -> np.ndarray:\n return cv2.imread(name, mode)\n\ndef show_img(title: str,img: np.ndarray,wait: int) -> int:\n cv2.imshow(title, img)\n k = cv2.waitKey(wait)\n cv2.destroyWindow(title)\n return k\n\ndef save_img(name,img):\n if type(name) == str:\n cv2.imwrite(name, img)\n else:\n [cv2.imwrite(\"out/\"+n+\".jpg\",i) for n,i in zip(name,img)]\n\n\n#finding edge positions only\ndef getEdgePositions(img):\n positions = []\n m, n = img.shape\n for a in range(m):\n for b in range(n):\n if (img[a, b]!=0):\n positions.append((a, b))\n return positions\n\n#reference point is taken as avg of all edge positions pixels\ndef getReferencePoint(edgePositions):\n a = 0\n b = 0\n for i in range(len(edgePositions)):\n a = a + edgePositions[i][0]\n b = b + edgePositions[i][1]\n a = a/len(edgePositions)\n b = b/len(edgePositions)\n return (int(a), int(b))\n\n#constructing the R-table to get structure of template\ndef rTable(img, refPoint, edgePositions):\n sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) #possibly tweaked\n abs_sobel64f = np.absolute(sobelx64f)\n rTable = {}\n for i, point in enumerate(edgePositions):\n rx = refPoint[0] - point[0]\n ry = refPoint[1] - point[1]\n r = (rx, ry)\n phi = abs_sobel64f[point[0], point[1]]\n if(phi not in list(rTable.keys())):\n rTable[phi] = [r]\n else:\n rTable[phi].append(r)\n rTable['refPoint'] = refPoint\n return rTable\n\n#finding rTable for all theta with a jump of angle degrees. The tables are all stored in a list of length 360/angle.\ndef rTableWithRotation(templateCanny,angle=2):\n rTableWithRotation = []\n rows, columns = templateCanny.shape\n for i in range(int(360/angle)):\n theta = angle*i\n M = cv2.getRotationMatrix2D((columns/angle,rows/angle),theta,1)\n templateCannyRotated = cv2.warpAffine(templateCanny,M,(columns,rows))\n templateEdgePositions = getEdgePositions(templateCannyRotated)\n templateRefPoint = getReferencePoint(templateEdgePositions)\n rTableTheta = rTable(templateCannyRotated, templateRefPoint, templateEdgePositions)\n rTableWithRotation.append(rTableTheta)\n return rTableWithRotation\n\n#finding the accumulatorTable for the main picture\ndef accumulatorTable(pictureCanny, template_rTable, angle=2):\n rows = pictureCanny.shape[0]\n columns = pictureCanny.shape[1]\n pictureEdgePositions = getEdgePositions(pictureCanny)\n accumulatorTable = np.ndarray((int(360/angle), rows, columns))\n sobelx64f = cv2.Sobel(pictureCanny,cv2.CV_64F,1,0,ksize=5)\n abs_sobel64f = np.absolute(sobelx64f)\n\n for theta, rTableTheta in enumerate(template_rTable):\n for i, edgePoint in enumerate(pictureEdgePositions):\n phi = abs_sobel64f[edgePoint[0], edgePoint[1]]\n if (phi in list(rTableTheta.keys())):\n temp = rTableTheta[phi]\n for r, vector in enumerate(temp):\n x = edgePoint[0] + vector[0]\n y = edgePoint[1] + vector[1]\n if (x>=0 and x<rows) and (y>=0 and y<columns):\n accumulatorTable[theta, x, y]+=1\n else:\n continue\n\n return accumulatorTable\n\n#getting back the original image\ndef reconstruction(rTable, theta, a, b, pictureCanny, angle=2):\n rTable = rTable[int(theta)]\n pictureEdgePositions = getEdgePositions(pictureCanny)\n draw = np.ones_like(pictureCanny)*255\n mask = np.zeros_like(pictureCanny)*255\n maskingPoints = []\n rows = pictureCanny.shape[0]\n columns = pictureCanny.shape[1]\n sobelx64f = cv2.Sobel(pictureCanny,cv2.CV_64F,1,0,ksize=5)\n abs_sobel64f = np.absolute(sobelx64f)\n\n for i, edgePoint in enumerate(pictureEdgePositions):\n phi = abs_sobel64f[edgePoint[0], edgePoint[1]]\n if (phi in list(rTable.keys())):\n temp = rTable[phi]\n for r, vector in enumerate(temp):\n x = a - vector[0]\n y = b - vector[1]\n if (x>=0 and x<rows) and (y>=0 and y<columns):\n cv2.circle(draw,(y, x), 1, (0,0,255), -1)\n maskingPoints.append((y, x))\n else:\n continue\n cv2.fillConvexPoly(mask, np.int32(maskingPoints), (1.0, 1.0, 1.0), 16, 0)\n\n M = cv2.getRotationMatrix2D((columns/angle,rows/angle),theta*angle,1)\n print(theta*angle)\n mask = cv2.warpAffine(mask,M,(columns,rows))\n return draw, mask\n\ndef hough(picture,template,angle=2):\n pictureCanny = cv2.Canny(picture, 100, 200)\n templateCanny = cv2.Canny(template, 100, 200)\n template_rTable = rTableWithRotation(templateCanny,angle)\n picture_accumulatorTable = accumulatorTable(pictureCanny, template_rTable,angle)\n theta, a, b = np.unravel_index(picture_accumulatorTable.argmax(),picture_accumulatorTable.shape)\n draw, mask = reconstruction(template_rTable, theta, a, b, pictureCanny,angle)\n return mask, draw\n\nif __name__ == \"__main__\":\n \n img_path = sys.argv[1]\n temp_path = sys.argv[2]\n img = load_img(img_path,1)\n template = load_img(temp_path,1)\n angle = 2\n mask, draw = hough(img,template, angle)\n cv2.imwrite(\"xd.png\", mask)\n cv2.imwrite(\"xd2.png\", draw)" }, { "alpha_fraction": 0.5706666707992554, "alphanum_fraction": 0.6053333282470703, "avg_line_length": 24.066667556762695, "blob_id": "7a1349fd23ee0e524fc1dc11c81bdeb54af58f9f", "content_id": "0c635d434e770087aa732fea1de027708d6932c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 72, "num_lines": 15, "path": "/lab4/scale.py", "repo_name": "oscarteeninga/Metody-Rozpoznawania-Obrazow", "src_encoding": "UTF-8", "text": "import cv2\nimport os\n\ndef scale_image(file, size=(400, 400)):\n if len(file.split(\".\")) == 1 or file.split(\".\")[1] != \"jpg\":\n return\n\n img = cv2.imread(file)\n img_res = cv2.resize(img, dsize=size, interpolation=cv2.INTER_CUBIC)\n cv2.imwrite('scaled/' + file, img_res)\n print(file + \" scaled...\")\n\nfor file in os.listdir():\n print\n scale_image(file)" } ]
3
deepBrainWH/MLAlgorithm
https://github.com/deepBrainWH/MLAlgorithm
ad71a474967eb74a5a37868726ef5eb7b00e6e06
58b09062a8b9f2b550f5168c0e033cf04f7d9a0f
9cc3aa74dcd99764f95c599d3d1eb3178d1b83dc
refs/heads/master
2021-06-09T17:50:30.403792
2021-04-20T08:50:05
2021-04-20T08:50:05
158,798,947
3
5
null
null
null
null
null
[ { "alpha_fraction": 0.45794767141342163, "alphanum_fraction": 0.4694164991378784, "avg_line_length": 29.672840118408203, "blob_id": "94af14c7006e304bb3aa9ebb6e43b4261c2eb3ea", "content_id": "fd75499cf4838e422b41674544702d6d57cb7c06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4970, "license_type": "no_license", "max_line_length": 92, "num_lines": 162, "path": "/sort/sort_algorithm.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from __future__ import print_function, absolute_import, division\n\nclass Sort:\n def __init__(self):\n self.heap = [-99999]\n \n def quick_sort(self, array, left, right):\n \"\"\"\n :param array: An array that need to sort.\n :param left: left index.\n :param right: right index.\n :return: none return.\n \"\"\"\n i = left + 1\n j = right\n tmp = array[left]\n while i<=j:\n while i<=j and array[i] < tmp:\n i += 1\n while i<=j and array[j] > tmp:\n j -= 1\n if i <= j:\n self.__swap(array, i, j)\n i += 1\n j -= 1\n self.__swap(array, left, j)\n if left < j:\n self.quick_sort(array, left, j-1)\n if i < right:\n self.quick_sort(array, j+1, right)\n\n def select_sort(self, array):\n \"\"\"\n select sort\n :param array:\n :return: none\n \"\"\"\n for i in range(len(array)):\n min = 9999\n min_index = 0\n for j in range(i, len(array)):\n if array[j] < min:\n min = array[j]\n min_index = j\n if min_index != i:\n self.__swap(array, i, min_index)\n\n def insert_sort(self, array):\n \"\"\"\n Insert sort.\n :param array:\n :return:\n \"\"\"\n for i in range(1, len(array)):\n j = i-1\n x = array[i]\n while x < array[j] and j>=0:\n array[j+1] = array[j]\n j -= 1\n array[j+1] = x\n\n def count_sort(self, array):\n min, max = self.__get_min_max(array)\n count = list([0] * (max - min + 1))\n\n\n def __swap(self, arr, a, b):\n tmp = arr[a]\n arr[a] = arr[b]\n arr[b] = tmp\n\n def __get_min_max(self, array):\n min = array[0]\n max = array[0]\n for i in array:\n if i < min:\n min = i\n if i > max:\n max = i\n return min, max\n\n def heap_append_value(self, value):\n length = len(self.heap) - 1\n self.heap.append(value)\n son = length+1\n father = son // 2\n while son != 1 and self.heap[father]>self.heap[son]:\n self.__swap(self.heap, son, father)\n son = father\n father = father // 2\n\n def heap_get_min(self):\n if len(self.heap)<=1:\n return None\n min = self.heap[1]\n self.heap[1] = self.heap.pop(len(self.heap)-1)\n self.heap_extract_sequence(1)\n return min\n\n def heap_extract_sequence(self, father):\n left = father * 2\n right = father * 2 + 1\n if left < len(self.heap) and right < len(self.heap):\n if self.heap[father] > self.heap[left] and self.heap[father] > self.heap[right]:\n if self.heap[left] < self.heap[right]:\n self.__swap(self.heap, father, left)\n father = left\n self.heap_extract_sequence(father)\n else:\n self.__swap(self.heap, father, right)\n father = right\n self.heap_extract_sequence(father)\n elif self.heap[father] > self.heap[left]:\n self.__swap(self.heap, father, left)\n father = left\n self.heap_extract_sequence(father)\n elif self.heap[father] > self.heap[right]:\n self.__swap(self.heap, father, right)\n father = right\n self.heap_extract_sequence(father)\n elif left < len(self.heap):\n if self.heap[father] > self.heap[left]:\n self.__swap(self.heap, father, left)\n father = left\n self.heap_extract_sequence(father)\n elif right < len(self.heap):\n self.__swap(self.heap, father, right)\n father = right\n self.heap_extract_sequence(father)\n\n def update_heap(self, index, value):\n try:\n self.heap[index] = value\n self.__update(index)\n except:\n raise IndexError(\"Array index out of bound!\")\n\n def __update(self, index):\n father = index // 2\n left = father * 2\n right = father * 2 + 1\n if self.heap[index] < self.heap[father]:\n self.__swap(self.heap, index, father)\n self.__update(father)\n else:\n self.heap_extract_sequence(index)\n\nif __name__ == \"__main__\":\n arr = [3, 4, 5, 1, 2, 4, 5]\n sort = Sort()\n # sort.quick_sort(array=arr, left=0, right=len(arr)-1)\n # sort.insert_sort(arr)\n # print(arr)\n for i in range(len(arr)):\n sort.heap_append_value(arr[i])\n print(sort.heap)\n # for i in range(5):\n # min = sort.heap_get_min()\n # print(min, sort.heap)\n print(\"update heap=========\")\n sort.update_heap(1, 10)\n print(sort.heap)\n\n" }, { "alpha_fraction": 0.5433130860328674, "alphanum_fraction": 0.5721884369850159, "avg_line_length": 28.93181800842285, "blob_id": "f77fdb5725c0ba462b2158d273d35ef00a041a52", "content_id": "6891e3f862584ebcf31704b1b68800e2ad8113ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1316, "license_type": "no_license", "max_line_length": 80, "num_lines": 44, "path": "/softmax/load_cifar10.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pickle as p\nimport numpy as np\nimport os\n\n\ndef load_CIFAR_BATCH(filename):\n \"\"\"load one batch file\"\"\"\n with open(filename, 'rb') as f:\n datadict = p.load(f, encoding='latin1')\n x = datadict['value']\n y = datadict['labels']\n x = x.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(np.float32)\n y = np.asarray(y, dtype=np.float32)\n return x, y\n\n\ndef load_cifar10(path):\n xs = []\n ys = []\n for b in range(1, 6):\n f = os.path.join(path, 'data_batch_%d' % (b))\n x, y = load_CIFAR_BATCH(f)\n xs.append(x)\n ys.append(y)\n xs = np.asarray(xs)\n xt = np.concatenate(xs)\n yt = np.concatenate(ys)\n del xs, ys\n xte, yte = load_CIFAR_BATCH(os.path.join(path, 'test_batch'))\n return xt, yt, xte, yte\n\n\ndef get_normalization_cifar10_data(path):\n x_train, y_train, x_test, y_test = load_cifar10(path)\n X_train = x_train.reshape([x_train.shape[0], -1])\n Y_train = y_train.reshape([y_train.shape[0], -1])\n X_test = x_test.reshape([x_test.shape[0], -1])\n Y_test = y_test.reshape([y_test.shape[0], -1])\n mean = np.mean(X_train, axis=0)\n std = np.std(X_train, axis=0)\n X_train = (X_train - mean)/std\n X_test = (X_test - mean)/std\n return X_train, Y_train, X_test, Y_test" }, { "alpha_fraction": 0.5892857313156128, "alphanum_fraction": 0.6227678656578064, "avg_line_length": 27.03125, "blob_id": "2608a6ce2abafcad58065e1a644414cf404387ec", "content_id": "2d39340993876dc3562cc67463bd0f0e45b8d95f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 64, "num_lines": 32, "path": "/operate_sys/short_task_priority.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 先来先服务调度算法实现\nfrom __future__ import print_function, absolute_import, division\n\nfrom operate_sys.priority_algorithm import PCB\nfrom collections import deque\nimport time\n\nclass ShortTaskPriority:\n def __init__(self):\n self.progresses = deque()\n\n def __progresses_in(self):\n self.progresses.append(PCB(1, 10, 0))\n self.progresses.append(PCB(3, 3, 0.2))\n self.progresses.append(PCB(2, 2, 0.4))\n self.progresses.append(PCB(5, 5, 0.5))\n self.progresses.append(PCB(4, 1, 0.8))\n self.progresses.append(PCB(7, 9, 1.2))\n self.progresses.append(PCB(6, 6, 1.5))\n\n def progress_in(self):\n self.__progresses_in()\n\n\nif __name__ == '__main__':\n short = ShortTaskPriority()\n short.progress_in()\n for progress in short.progresses:\n print(progress)\n progress.status=1\n print(progress, '\\n')\n time.sleep(1)" }, { "alpha_fraction": 0.49132177233695984, "alphanum_fraction": 0.5120159983634949, "avg_line_length": 29.59183692932129, "blob_id": "4853bf6b92f785abd5dddc7b2dc1088892637ff2", "content_id": "c967e1d987c53361d0c64edee2cd293e75186302", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1768, "license_type": "no_license", "max_line_length": 77, "num_lines": 49, "path": "/reinforcement_learning/DQN/exp1/run_this.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from maze_env import Maze\nfrom RL_brain import DeepQNetwork\n\n\ndef run_maze():\n step = 0\n for episode in range(300):\n # 初始化游戏环境\n observation = env.reset()\n\n while True:\n # 刷新游戏环境\n env.render()\n # RL根据当前状态选择一个action\n action = RL.choose_action(observation)\n # RL 选择一个action后, agent有一个新的观测值, 同时获得一个奖励, 和是否结束游戏的标识位: done\n observation_, reward, done = env.step(action)\n # 存储到replay memory中, 用于后面随机选择(s, a, r, s_)进行训练\n RL.store_transition(observation, action, reward, observation_)\n # 探索次数大于200时开始学习是为了防止replay memory中没得训练数据; 然后每探索5步训练一次\n if (step > 200) and (step % 5 == 0):\n RL.learn()\n # 更改环境当前状态\n observation = observation_\n # 找到宝藏, 游戏结束, 退出这一次玩耍, 进入下一次的训练\n if done:\n break\n step += 1\n\n # end of game\n print('game over')\n env.destroy()\n\n\nif __name__ == \"__main__\":\n # maze game\n env = Maze()\n print(\"actions num: \", env.action_size, \"\\nfeatures: \", env.feature_size)\n RL = DeepQNetwork(env.action_size, env.feature_size,\n learning_rate=0.01,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=200,\n memory_size=2000,\n # output_graph=True\n )\n env.after(100, run_maze)\n env.mainloop()\n RL.plot_cost()" }, { "alpha_fraction": 0.601190447807312, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 14.363636016845703, "blob_id": "02ae4cbb56847889e19d6d2974cc1df5942dc396", "content_id": "80a404d4bf5287fe9bc02a52498ede0031f1e382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 202, "license_type": "no_license", "max_line_length": 59, "num_lines": 11, "path": "/recomend/cf/README.md", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "## 协同过滤\n\n### 计算相关系数\n<p>\n <img src=\"./user-based-collaborative-filtering.png\"/>\n</p>\n\n### 计算估计打分值\n<p>\n <img src=\"./user-based-collaborative-filtering-1.png\"/>\n</p>" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.6095238327980042, "avg_line_length": 13.785714149475098, "blob_id": "cfe39cf377ab52c62798a0ae3a0c478aefb09206", "content_id": "1ba3de9711777191a91a49b74cf5544f5fff36d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 30, "num_lines": 14, "path": "/KNN/mynumpy.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding utf-8 -*-\nimport numpy as np\ndata = np.random.random([1,8])\nprint(data)\ndata.shape = (2,4)\nprint(data)\n\na= np.arange(6,10)\nb= np.arange(5,9)\nd= np.arange(11,15)\ne = a*b\nc= a+b\nprint(c)\nprint(e)\n\n\n\n" }, { "alpha_fraction": 0.4737562835216522, "alphanum_fraction": 0.5038794875144958, "avg_line_length": 37.438594818115234, "blob_id": "bfaf534188fe9c5f8e96e56d4cb705dfdeb74b20", "content_id": "0286efc5b890ee0cf24dd3179b9767718657f86f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2191, "license_type": "no_license", "max_line_length": 125, "num_lines": 57, "path": "/nn/conv.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\n\ndef conv_forward_naivel(x, w, b, conv_param):\n \"\"\"\n Input:\n - x: 4-dim image data: (N, H, W, C), which represent: image's number, image's width, image's height, image's channel.\n - w: 4-dim convolution kernel: (F, C, HH, WW), which represent: input channel, output channel, kernel height,\n kernel width.\n - b: bias.\n - conv_param: dictionary type. key is :\n - stride: the number of spans of the data to be convolved.\n - pad: the number of zero padding of input data.\n Returns: tuple\n - out: output data(N, F, H1, W1)\n H1 = 1 + (H - HH + 2P)/stride\n W1 = 1 + (W - WW + 2P)/stride\n - cache: (x, w, b, conv_param)\n \"\"\"\n N, H, W, C = x.shape[0], x.shape[1], x.shape[2], x.shape[3]\n CC, HH, WW = w.shape[1], w.shape[2], w.shape[3]\n\n pad = conv_param['pad']\n stride = conv_param['stride']\n\n x_pad = np.pad(x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant',\n constant_values=[[0, 0], [0, 0], [0, 0], [0, 0]])\n\n Hhat = 1 + (H - HH + 2 * pad) // stride\n What = 1 + (W - WW + 2 * pad) // stride\n\n out = np.zeros((N, Hhat, What, CC))\n for n in range(N):\n for f in range(CC):\n for i in range(Hhat):\n for j in range(What):\n kernel = w[:, f, :, :]\n kernel_reshape = kernel.reshape(kernel.shape[1], kernel.shape[2], kernel.shape[0])\n out[n, i, j, f] = np.sum(x_pad[n, i * stride: i * stride + HH, j * stride:j * stride + WW, :] \\\n * kernel_reshape) + b[f]\n cache = (x, w, b, conv_param)\n return out, cache\n\n\nif __name__ == '__main__':\n image = np.ones([1, 5, 5, 3]) * 2\n cv2.imshow(\"image\", np.asarray(image.reshape([5, 5, 3]), np.uint8))\n w = np.ones([3, 3, 3, 3]) * 3\n b = np.ones([3])\n conv_param = {'pad': 2, 'stride': 1}\n out, cache = conv_forward_naivel(image, w, b, conv_param)\n print(out.shape)\n cv2.imshow(\"conv\", np.asarray(out.reshape([7, 7, 3]), np.uint8))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n print(out.reshape([7, 7, 3]))\n" }, { "alpha_fraction": 0.7351077198982239, "alphanum_fraction": 0.7389100193977356, "avg_line_length": 28.259260177612305, "blob_id": "f9c9e93923adf5173f60f899a103d5ceed55f6f2", "content_id": "02e747995f4022c637f48b5c23da7d87d3622cd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 789, "license_type": "no_license", "max_line_length": 65, "num_lines": 27, "path": "/nlp/nlp_tokenizer.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nsentences = [\n 'I love my dog.',\n 'I love my cat',\n 'Do you love my pig?',\n 'Do you think my dog is amazing?'\n]\ntokenizer = Tokenizer(num_words=100, oov_token='<OOV>')\ntokenizer.fit_on_texts(sentences)\n\nword_index = tokenizer.word_index\n\ntest_data = [\n 'I really love my dog',\n 'my dog very loves my manatee'\n]\nsequences = tokenizer.texts_to_sequences(sentences)\ntest_sequences = tokenizer.texts_to_sequences(test_data)\n# matrix = tokenizer.texts_to_matrix(sentences)\npadded = pad_sequences(sequences, padding='post')\nprint(padded)\nprint(word_index)\nprint(sequences)\n# print(matrix)\nprint('test sequences:', test_sequences)" }, { "alpha_fraction": 0.4898032248020172, "alphanum_fraction": 0.5109123587608337, "avg_line_length": 28.11458396911621, "blob_id": "415edfa4a76e094c2a6fdca09408b272a3c1d855", "content_id": "2f5a8dea78c4008f628daf8832610ff561aaecdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2799, "license_type": "no_license", "max_line_length": 105, "num_lines": 96, "path": "/reinforcement_learning/rl.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport time\n\nnp.random.seed(2)\n\nN_STATE = 6 # the length of the 1 dimensional word.\nACTION = ['left', 'right'] # available actions\nEPSILON = 0.9 # greedy police\nALPHA = 0.1 # learning rate\nGAMMA = 0.9 # discount factor\nMAX_EPISODES = 13 # maximum episodes. (回合)\nFRESH_TIME = 0.3 # fresh time of one step of moving.\n\ndef build_q_table(n_state, actions):\n table = pd.DataFrame(np.zeros([n_state, len(actions)]), #q_table initial values.\n columns=actions# action's name\n )\n return table\n\ndef choose_action(state, q_table):\n # This is how to choose an action.\n state_actions = q_table.iloc[state, :]\n if (np.random.uniform() > EPSILON) or (state_actions.all() == 0):\n action_name = np.random.choice(ACTION)\n else:\n action_name = state_actions.idxmax()\n return action_name\n\ndef get_env_feedback(S, A):\n # This is how agent will interact\n if A == 'right':\n if S == N_STATE - 2:\n S_ = 'terminal'\n R = 1\n else:\n S_ = S+1\n R = 0\n else:\n R = 0\n if S == 0:\n S_ = S\n else:\n S_ = S-1\n return S_, R\n\ndef update_env(S, episode, step_counter):\n # This is how environment be updated.\n env_list = ['-']*(N_STATE-1) + ['T'] # our environment -----T\n if S == 'terminal':\n interaction = 'Episode %s: total_steps = %s' % (episode + 1, step_counter)\n print('\\r{}'.format(interaction))\n time.sleep(2)\n print('\\r', end='')\n else:\n env_list[S] = 'o'\n interaction = ''.join(env_list)\n print('\\r{}'.format(interaction), end='')\n time.sleep(FRESH_TIME)\n\ndef rl():\n q_table = build_q_table(N_STATE, ACTION)\n for episode in range(MAX_EPISODES):\n step_counter = 0\n S = 3\n is_terminated = False\n update_env(S, episode, step_counter)\n while not is_terminated:\n A = choose_action(S, q_table)\n S_, R = get_env_feedback(S, A)\n q_predict = q_table.loc[S, A]\n if S_ != 'terminal':\n q_target = R + GAMMA * q_table.iloc[S_, :].max()\n else:\n q_target = R\n is_terminated = True\n q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update\n S = S_\n\n update_env(S, episode, step_counter+1)\n step_counter += 1\n return q_table\n\nif __name__ == \"__main__\":\n q_table = rl()\n print('\\r\\nQ-table:\\n')\n print(q_table)\n# if __name__ == '__main__':\n# i = 0\n# len_bar = 20\n# a = 0\n# while i<999999:\n# a = 999999 // 20\n# dot = i // a\n# print('\\r' + dot * '=' + '>' + '.'*(20-dot) + '|' + '%.2f'% ((i / 999999) * 100) + '%', end='')\n# i += 1\n" }, { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6747967600822449, "avg_line_length": 16.714284896850586, "blob_id": "05eee8e01d4b4f72800aeb3a31d5802766c73be6", "content_id": "39923453a3e912819d2bf64a446e334853197cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/image_process/frame_sub.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.array([1,3,5,7])\ny = np.multiply(x, 3)\nplt.plot(x, y)\nplt.show()" }, { "alpha_fraction": 0.542176365852356, "alphanum_fraction": 0.5624050498008728, "avg_line_length": 47.289573669433594, "blob_id": "67ed075628709752fdd2f8c8c5f0f52e7369d527", "content_id": "85f27cff2a4d83b455f8b6299b51a8c306265e99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12557, "license_type": "no_license", "max_line_length": 169, "num_lines": 259, "path": "/reinforcement_learning/DSR-Maze/maze_agent.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nimport os\nimport pickle\n\ntf.disable_v2_behavior()\ntf.disable_eager_execution()\n\n\nclass AgentDSR(object):\n def __init__(self, env, n_steps=3000, output_graph=False):\n self.env = env # environment\n self.n_steps = n_steps # Training steps.\n self.output_graph = output_graph\n self.n_actions = self.env.action_space.n\n self.feature_shape = self.env.observation_space.shape\n self.n_features = np.prod(np.array(self.feature_shape))\n self.save_every_n_episode = 100\n self.start_learning_after_n_step = 3000\n self.memory_size = 256\n self.memory = np.zeros((self.memory_size, 3 + self.n_features * 2))\n self.memory_counter = 0\n self.batch_size = 32\n self.gamma = 0.9\n self.epsilon = 1.0\n self.epsilon_min = 0.07\n self.epsilon_decrement = (self.epsilon - self.epsilon_min) / 8000.\n self.reward_his = []\n\n self.fi_size = 64\n self.sess = tf.Session()\n self.__get_model_save_path()\n self.__build_model()\n\n self.loss1_p = tf.placeholder(tf.float32, name='autoencoder_loss')\n self.loss2_p = tf.placeholder(tf.float32, name='reward_loss')\n self.loss3_p = tf.placeholder(tf.float32, name='mu_loss')\n self.loss_p = tf.placeholder(tf.float32, name='all_loss')\n if self.output_graph:\n self.summary = [tf.summary.scalar('loss1_autoencoder_loss', self.loss1_p),\n tf.summary.scalar('loss2_reward_loss', self.loss2_p),\n tf.summary.scalar('loss3_mu_loss', self.loss3_p),\n tf.summary.scalar('loss', self.loss_p)]\n self.merged = tf.summary.merge_all()\n self.__ouput_graph()\n\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver(max_to_keep=10)\n\n def __build_model(self):\n from tensorflow.python.ops.init_ops import VarianceScaling\n\n def lecun_normal(seed=None):\n return VarianceScaling(seed=seed)\n\n w_initializer = lecun_normal()\n\n self.s_p = tf.placeholder(tf.float32, [None, self.n_features], 'state')\n self.decoded_s_p = tf.placeholder(tf.float32, [None, self.n_features], \"decoded_state\")\n self.r_p = tf.placeholder(tf.float32, [None, 1], 'r')\n self.fi_eval_p = tf.placeholder(tf.float32, [None, self.fi_size], name='fi')\n self.mu_next_p = tf.placeholder(tf.float32, [None, self.fi_size], name='mu_next')\n self.action_index_p = tf.placeholder(tf.int32, name='action_index')\n\n self.mus_eval = []\n self.mus_target = []\n\n with tf.variable_scope('encoder'):\n en_1 = tf.layers.dense(self.s_p, 32, tf.nn.relu6, kernel_initializer=w_initializer, name='en_1')\n en_2 = tf.layers.dense(en_1, 64, tf.nn.relu6, kernel_initializer=w_initializer, name='en_2')\n en_3 = tf.layers.dense(en_2, 64, tf.nn.relu6, kernel_initializer=w_initializer, name='en_3')\n self.encoded = tf.layers.dense(en_3, self.fi_size, tf.nn.softmax, kernel_initializer=w_initializer, name='encoded')\n\n with tf.variable_scope('decoder'):\n de_1 = tf.layers.dense(self.encoded, 64, tf.nn.relu6, kernel_initializer=w_initializer, name='de_1')\n de_2 = tf.layers.dense(de_1, 64, tf.nn.relu6, kernel_initializer=w_initializer, name='de_2')\n de_3 = tf.layers.dense(de_2, 32, tf.nn.relu6, kernel_initializer=w_initializer, name='de_3')\n self.decoded = tf.layers.dense(de_3, self.n_features, kernel_initializer=w_initializer, name='decoded')\n\n with tf.variable_scope('reward'):\n self.R = tf.layers.dense(self.fi_eval_p, 1, tf.nn.relu6, kernel_initializer=w_initializer, name='R')\n\n # for i in range(self.n_actions):\n # with tf.variable_scope('mu_%d' % i):\n # mu_1 = tf.layers.dense(self.fi_eval_p, 64, tf.nn.relu6, kernel_initializer=w_initializer, name='action_%d_mu1' % i)\n # mu_2 = tf.layers.dense(mu_1, 64, tf.nn.relu6, kernel_initializer=w_initializer, name='action_%d_mu2' % i)\n # mu_3 = tf.layers.dense(mu_2, self.fi_size, tf.nn.relu6, kernel_initializer=w_initializer, name='action_%d_mu3' % i)\n # self.mus_eval.append(mu_3)\n\n\n with tf.variable_scope('encoder_loss'):\n self.loss1 = tf.reduce_mean(tf.squared_difference(self.decoded, self.decoded_s_p, name='loss1'))\n\n with tf.variable_scope('reward_loss'):\n self.loss2 = tf.reduce_mean(tf.squared_difference(self.R, self.r_p, name='loss2'))\n\n with tf.variable_scope('mu_loss'):\n self.loss3 = tf.reduce_mean(tf.squared_difference(self.fi_eval_p + self.gamma * self.mu_next_p, tf.gather(self.mus_eval, self.action_index_p), name='loss3'))\n\n self.loss = self.loss1 + self.loss2 + self.loss3\n\n with tf.variable_scope('train'):\n self.train_op1 = tf.train.RMSPropOptimizer(0.0001, 0.9).minimize(self.loss1)\n self.train_op2 = tf.train.RMSPropOptimizer(0.0001, 0.9).minimize(self.loss2)\n self.train_op3 = tf.train.RMSPropOptimizer(0.0001, 0.9).minimize(self.loss3)\n\n def learn(self, step):\n \"\"\"\n :return: Learning network's parameters.\n \"\"\"\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size, replace=False)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n batch_memory = self.memory[sample_index, :]\n n_features_tensor = [self.batch_size] + [dim for dim in self.feature_shape]\n # ------------------------------------------------------------------- #\n s = np.reshape(batch_memory[:, :self.n_features], n_features_tensor)\n actions = batch_memory[:, self.n_features].astype(int)\n rewards = np.reshape(batch_memory[:, self.n_features + 1], (self.batch_size, 1))\n s_ = batch_memory[:, -self.n_features:]\n w = self.sess.run(self.get_w())\n # ------------------------------------------------------------------- #\n # autoencoder loss.\n loss1, _ = self.sess.run([self.loss1, self.train_op1], feed_dict={self.s_p: s, self.decoded_s_p: s})\n\n fi = self.sess.run(self.encoded, feed_dict={self.s_p: s})\n fi_ = self.sess.run(self.encoded, feed_dict={self.s_p: s_})\n\n # reward loss.\n loss2, _ = self.sess.run([self.loss2, self.train_op2], feed_dict={self.fi_eval_p: fi, self.r_p: rewards})\n\n # mu loss.\n sum_loss3 = 0.\n for i in range(actions.shape[0]):\n # batch iteration.\n mus_ = self.sess.run(self.mus_eval, feed_dict={self.fi_eval_p: np.expand_dims(fi_[i], axis=0)})\n max_action_index_s_ = np.argmax(np.matmul(mus_, w))\n loss3, _ = self.sess.run([self.loss3, self.train_op3], feed_dict={self.fi_eval_p: np.expand_dims(fi[i],axis=0),\n self.mu_next_p: mus_[max_action_index_s_],\n self.action_index_p: actions[i]})\n sum_loss3 += loss3\n mean_loss3 = sum_loss3 / actions.shape[0]\n if self.output_graph:\n [s_loss1, s_loss2, s_loss3, s_loss] = self.sess.run(self.summary, feed_dict={self.loss1_p: loss1, self.loss2_p: loss2,\n self.loss3_p: mean_loss3, self.loss_p: loss1 + loss2 + mean_loss3})\n self.train_writer.add_summary(s_loss1, step)\n self.train_writer.add_summary(s_loss2, step)\n self.train_writer.add_summary(s_loss3, step)\n self.train_writer.add_summary(s_loss, step)\n self.train_writer.flush()\n\n return loss1, loss2, mean_loss3\n\n def train(self):\n global action\n episode = 0 # 训练多少轮\n step = 0\n reward_avg_max = 0\n loss3 = 9.999\n\n while step < self.n_steps:\n observation = self.env.reset()\n done = False\n episode_reward = 0.0\n while not done:\n self.env.render()\n action = self.choice_action(observation, test=False)\n observation_, reward, done, info = self.env.step(action)\n episode_reward += reward\n self.store_trainsition(observation, action, reward, done, observation_)\n if step > self.start_learning_after_n_step:\n loss1, loss2, loss3 = self.learn(step)\n print('Step: %i, Episode: %i, Action:%i, Reward:%.2f, Epsilon: %.5f, Loss1:%.5f, Loss2:%.5f, Loss3:%.5f'\n % (step, episode, action, reward, self.epsilon, loss1, loss2, loss3), end='\\r')\n self.epsilon = self.epsilon - self.epsilon_decrement if self.epsilon > self.epsilon_min else self.epsilon_min # decreasing epsilon\n observation = observation_\n step += 1\n episode += 1\n self.reward_his.append(episode_reward)\n if step < 2500:\n # 前1000,000步每100步保存一次模型数据\n if episode % self.save_every_n_episode == 0:\n pickle.dump(self.reward_his, open(self.reward_his_path, 'wb'), True)\n self.saver.save(self.sess, os.path.join(self.model_path, 'model-dqn'), global_step=episode)\n else:\n rwd_avg = np.mean(self.reward_his[-20:])\n if rwd_avg > reward_avg_max:\n pickle.dump(self.reward_his, open(self.reward_his_path, 'wb'), True)\n self.saver.save(self.sess, os.path.join(self.model_path, 'model-dqn'), global_step=episode)\n reward_avg_max = rwd_avg\n print(\"Saving best model with avg reward: \", reward_avg_max)\n if episode % self.save_every_n_episode == 0:\n pickle.dump(self.reward_his, open(self.reward_his_path, 'wb'), True)\n print('Step: %i/%i, Episode: %i, Action: %i, Episode Reward: %.0f, Epsilon: %.2f, Loss: %.5f' % (\n step, self.n_steps, episode, action, episode_reward, self.epsilon, loss3))\n self.env.close()\n\n def get_w(self):\n \"\"\"\n :return: 获取W值\n \"\"\"\n return self.sess.graph.get_tensor_by_name('reward/R/kernel:0')\n\n def __ouput_graph(self):\n \"\"\"\n 输出计算图\n :return:\n \"\"\"\n directory = \"./log\"\n try:\n if not os.path.exists(directory):\n os.mkdir(directory)\n except:\n print(\"Filed to Create result directory: %s\" % directory)\n directory = \"./\"\n self.train_writer = tf.summary.FileWriter(directory, self.sess.graph)\n\n def store_trainsition(self, s, a, r, d, s_):\n \"\"\"\n :param s: current state.\n :param a: choiced action.\n :param r: reward.\n :param d: wheathre done.\n :param s_: next state.\n \"\"\"\n transition = np.hstack((np.reshape(s, [-1]), [a, r, int(d)], np.reshape(s_, [-1])))\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def choice_action(self, observation, test=False):\n \"\"\"\n :param observation: current state.\n :param test\n :return: action index.\n \"\"\"\n if test: self.epsilon = 0.01\n if np.random.uniform() > self.epsilon:\n observation = np.expand_dims(observation, 0)\n encoded = self.sess.run(self.encoded, feed_dict={self.s_p: observation})\n mus_eval = self.sess.run(self.mus_eval, feed_dict={self.fi_eval_p: encoded})\n mus = tf.constant(np.squeeze(np.stack(mus_eval), axis=1))\n Q_tensor = tf.matmul(mus, self.get_w())\n action_index = int(self.sess.run(tf.argmax(Q_tensor))[0])\n else:\n action_index = np.random.randint(0, self.n_actions)\n return action_index\n\n def __get_model_save_path(self):\n directory = \"./model\"\n try:\n if not os.path.exists(directory):\n os.mkdir(directory)\n except:\n print(\"Filed to create result directory.\")\n directory = \"./\"\n self.reward_his_path = os.path.join(directory, \"reward_his_dqn.pkl\")\n self.model_path = directory\n" }, { "alpha_fraction": 0.484197735786438, "alphanum_fraction": 0.48824959993362427, "avg_line_length": 37.53125, "blob_id": "ff6181000a6a8d89fd9180e9c21094bb821120af", "content_id": "3a055bc7e9b25b058d4b4b90330dc212f2307001", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2468, "license_type": "no_license", "max_line_length": 116, "num_lines": 64, "path": "/tree/binary_search_tree.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "class TreeNode:\n def __init__(self, value=None):\n self.parent = None\n self.left = None\n self.right = None\n self.value = value\n\nclass BSTree(object):\n def __init__(self):\n self.root = None\n\n def build_tree(self):\n while True:\n data = input(\"please input the value that you want to insert to the tree:\")\n if data == \"000\":\n print(\"Finishing building a binary search tree!\")\n break\n else:\n node = TreeNode(data)\n if self.root is None:\n self.root = node\n else:\n tmp = self.root\n while True:\n if data > tmp.value and tmp.right is None:\n tmp.right = node\n node.parent = tmp\n break\n elif data > tmp.value and tmp.right is not None:\n tmp = tmp.right\n continue\n elif data < tmp.value and tmp.left is None:\n tmp.left = node\n node.parent = tmp\n break\n elif data < tmp.value and tmp.left is not None:\n tmp = tmp.left\n continue\n else:\n print(\"Data is already in tree!\")\n\n def mid_order(self, root):\n if root is None:\n return\n self.mid_order(root.left)\n print(root.data, end=\" \")\n self.mid_order(root.right)\n\n def get_maximum_depth(self, root):\n return 0 if root is None else max(self.get_maximum_depth(root.left), self.get_maximum_depth(root.right)) + 1\n\n def get_minimum_depth(self, root):\n if root is None:return 0\n if root.left is None and root.right is None:return 1\n if root.left is not None and root.right is None: return self.get_minimum_depth(root.left) + 1\n if root.left is None and root.right is not None: return self.get_minimum_depth(root.right) + 1\n return min(self.get_minimum_depth(root.left), self.get_minimum_depth(root.right)) + 1\n\n# if __name__ == '__main__':\n# tree = BSTree()\n# tree.build_tree()\n# tree.mid_order(tree.root)\n# print('\\n',tree.get_maximum_depth(tree.root))\n# print('\\n',tree.get_minimum_depth(tree.root))\n\n\n" }, { "alpha_fraction": 0.5695671439170837, "alphanum_fraction": 0.5848056674003601, "avg_line_length": 45.20408248901367, "blob_id": "7929f858cc25e19804dc3e755dafedf14fdfb2c1", "content_id": "45df77370a41d0fc7ca55acbd4d314803b504d1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9242, "license_type": "no_license", "max_line_length": 153, "num_lines": 196, "path": "/reinforcement_learning/double_dqn.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\n# import tensorflow.compat.v1 as tf\nimport tensorflow._api.v2.compat.v1 as tf\nimport gym\nimport matplotlib.pyplot as plt\n\ntf.disable_eager_execution()\ntf.disable_v2_behavior()\n\n\nclass DoubleDQN:\n def __init__(self,\n n_actions, n_features, learning_rate=0.005, reward_decay=0.9, replace_decay=0.9, e_greedy=0.9,\n replace_target_iter=200, memory_size=3000, batch_size=32, e_greedy_increment=None,\n output_graph=False, double_q=True, sess: tf.Session = None):\n self.n_actions = n_actions\n self.n_features = n_features\n self.learning_rate = learning_rate\n self.gamma = reward_decay\n self.replace_decay = replace_decay\n self.replace_target_iter = replace_target_iter\n self.epsilon_max = e_greedy\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.e_greedy_increment = e_greedy_increment\n self.output_graph = output_graph\n self.double_q = double_q\n self.memory_counter = 0\n self.learn_step_counter = 0\n self.memory = np.zeros((self.memory_size, self.n_features * 2 + 2))\n self.epsilon = 0 if self.e_greedy_increment is not None else self.epsilon_max\n self._build_net()\n\n e_params = tf.get_collection('eval_net_params')\n t_params = tf.get_collection('target_net_params')\n with tf.variable_scope(\"assign_op\"):\n self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n if sess is None:\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n else:\n self.sess = sess\n\n if output_graph:\n tf.summary.FileWriter(\"./logs/\", self.sess.graph)\n self.cost_his = [] # 损失函数历史记录\n\n def _build_net(self):\n # Building the structure of neural network.\n def build_layer(s, c_names, n_l1, n_l2, w_initializer, b_initializer):\n with tf.variable_scope('l1'):\n w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)\n b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)\n l1 = tf.nn.relu(tf.matmul(s, w1) + b1)\n with tf.variable_scope('l2'):\n w2 = tf.get_variable('w2', [n_l1, n_l2], initializer=w_initializer, collections=c_names)\n b2 = tf.get_variable('b2', [1, n_l2], initializer=b_initializer, collections=c_names)\n l2 = tf.nn.relu(tf.matmul(l1, w2) + b2)\n with tf.variable_scope('l3'):\n w3 = tf.get_variable('w3', [n_l2, self.n_actions], initializer=w_initializer, collections=c_names)\n b3 = tf.get_variable('b3', [1, self.n_actions], initializer=b_initializer, collections=c_names)\n l3 = tf.nn.relu(tf.matmul(l2, w3) + b3)\n return l3\n\n # Building the evaluate net\n\n self.state = tf.placeholder(tf.float32, [None, self.n_features], name='state')\n self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='q_target') # expect output\n\n with tf.variable_scope('eval_net'):\n c_names, n_l1, n_l2, w_initializer, b_initializer = ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 64, 64, tf.random_normal_initializer(\n 0.0, 0.3), tf.random_normal_initializer(0., 0.3)\n self.q_eval = build_layer(self.state, c_names, n_l1, n_l2, w_initializer, b_initializer)\n\n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))\n with tf.variable_scope('train'):\n self._train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)\n # Building the target net.\n self.state_ = tf.placeholder(tf.float32, [None, self.n_features], name='state_')\n with tf.variable_scope('target_net'):\n c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n self.q_next = build_layer(self.state_, c_names, n_l1, n_l2, w_initializer, b_initializer)\n\n def store_transition(self, s, a, r, s_):\n transition = np.hstack((s, [a, r], s_))\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def choose_action(self, current_state):\n current_state = current_state[np.newaxis, :]\n action_value = self.sess.run(self.q_eval, feed_dict={self.state: current_state})\n action = np.argmax(action_value)\n if not hasattr(self, 'q'):\n self.q = []\n self.running_q = 0.\n self.running_q = self.running_q * 0.99 + 0.01 * np.max(action_value)\n self.q.append(self.running_q)\n if np.random.uniform() > self.epsilon:\n action = np.random.randint(0, self.n_actions)\n return action\n\n def learn(self):\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.replace_target_op)\n print(\"target params replaced!\")\n\n if self.memory_counter > self.memory_size:\n # 当存入replay_buffer中的数据量超过memory_size时,从memory_size中选取,防止越界\n sample_index = np.random.choice(self.memory_size, self.batch_size)\n else:\n # 当存入replay_buffer中的数据量小于memory_size是,从memory_counter中取,防止取到无效数据\n sample_index = np.random.choice(self.memory_counter, self.batch_size)\n batch_memory = self.memory[sample_index, :]\n q_eval = self.sess.run(self.q_eval, feed_dict={self.state: batch_memory[:, :self.n_features]}) # 传入当前状态\n q_next = self.sess.run(self.q_next, feed_dict={self.state_: batch_memory[:, -self.n_features:]}) # 传入下一个状态\n\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n\n q_target = q_eval.copy() # 输出每个action对应的q value\n eval_act_index = batch_memory[:, self.n_features].astype(int)\n reward = batch_memory[:, self.n_features + 1]\n\n if self.double_q:\n # double q learning 中选择最大的q是从q_eval中选择, 而不是从q_target中选择.\n max_act = np.argmax(q_eval, axis=1)\n selected_q_next = q_next[batch_index, max_act]\n else:\n max_act = np.argmax(q_next, axis=1)\n selected_q_next = q_next[batch_index, max_act]\n q_target[batch_index, eval_act_index] = reward + self.gamma * selected_q_next\n\n _, loss_value = self.sess.run([self._train_op, self.loss], feed_dict={self.q_target: q_target,\n self.state: batch_memory[:,\n :self.n_features]})\n self.epsilon += self.e_greedy_increment if self.epsilon < self.epsilon_max else 0\n self.cost_his.append(loss_value)\n self.learn_step_counter += 1\n\n\nclass RunClass:\n def __init__(self):\n self.sess = tf.Session()\n self.action_space = 11\n self.memory_size = 3000\n self.n_features = 3\n self.env = gym.make('Pendulum-v0')\n self.env = self.env.unwrapped\n self.env.seed(1)\n self.build_learning_model()\n\n def build_learning_model(self):\n # with tf.variable_scope(\"Natural-DQN\"):\n # self.natural_dqn = DoubleDQN(self.action_space, self.n_features, memory_size=self.memory_size,\n # e_greedy_increment=0.001, double_q=False, sess=self.sess, output_graph=True)\n\n with tf.variable_scope(\"Double-DQN\"):\n self.double_dqn = DoubleDQN(self.action_space, self.n_features, memory_size=self.memory_size,\n double_q=True, sess=self.sess, e_greedy_increment=0.001, output_graph=True)\n self.sess.run(tf.global_variables_initializer())\n\n def train(self, RL: DoubleDQN):\n total_step = 0\n observation = self.env.reset()\n while True:\n action = RL.choose_action(observation)\n # 将action转换到[-2, 2]之间\n f_action = (action - (self.action_space - 1) / 2) / ((self.action_space - 1) / 4)\n observation_, reward, done, info = self.env.step(np.array([f_action]))\n reward /= 10\n RL.store_transition(observation, action, reward, observation_)\n if total_step > self.memory_size:\n RL.learn()\n if total_step - self.memory_size > 20000:\n break\n observation = observation_\n total_step += 1\n return RL.q\n\n def run(self):\n # q_natural = self.train(self.natural_dqn)\n q_double = self.train(self.double_dqn)\n # plt.plot(np.array(q_natural), c='r', label='natural')\n plt.plot(np.array(q_double), c='b', label='double')\n plt.legend(loc='best')\n plt.ylabel('Q eval')\n plt.xlabel('training steps')\n plt.grid()\n plt.show()\n\n\nif __name__ == '__main__':\n run_class = RunClass()\n run_class.run()\n" }, { "alpha_fraction": 0.5048293471336365, "alphanum_fraction": 0.5106245875358582, "avg_line_length": 30.383838653564453, "blob_id": "7cb957402f5e56f02cc0c7566d710c861aa68db4", "content_id": "7df0c64745aafeeaae93c2c88da3aaba35e96f14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3106, "license_type": "no_license", "max_line_length": 92, "num_lines": 99, "path": "/tree/huffman.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from collections import deque\n\nclass Node:\n def __init__(self, data=None, pre=None, left=None, right=None, isLeaf=False, name=None):\n self.data = data\n self.pre = pre\n self.left = left\n self.right = right\n self.isLeaf = isLeaf\n self.name = name\n self.code = ''\n\n def __str__(self):\n return str(self.name) + \":\" + str(self.data) + \"\\tcode:\" + self.code\n\nclass Huffman:\n def __init__(self, string):\n self.root = None\n self.string = string\n self.frequency = {}\n self.tmp_dict = []\n\n for ch in self.string:\n if ch in self.frequency.keys():\n self.frequency[ch] += 1\n else:\n self.frequency[ch] = 0\n min = float('inf')\n min_key = None\n print(self.frequency)\n while len(self.frequency) > 0:\n for key, value in self.frequency.items():\n if value < min:\n min_key = key\n min = value\n self.tmp_dict.append(Node(min, name=min_key, isLeaf=True))\n del self.frequency[min_key]\n min = float('inf')\n del self.frequency\n for i in self.tmp_dict:\n print(i)\n\n def encoded(self):\n while len(self.tmp_dict)>0:\n value1 = self.get_min_value()\n value2 = self.get_min_value()\n left = value1\n right = value2\n root = Node(value2.data + value1.data, None, left, right, False, None)\n left.pre = root\n right.pre = root\n self.root = root\n if len(self.tmp_dict) > 0:\n self.append_root_node(root)\n else:\n break\n self.post_order(self.root)\n tree = self.leverorder(self.root)\n for i in tree:\n print(i)\n\n def get_min_value(self):\n return self.tmp_dict.pop(0)\n\n def append_root_node(self, node):\n for i in range(len(self.tmp_dict)-1):\n if self.tmp_dict[i].data <= node.data <= self.tmp_dict[i + 1].data:\n self.tmp_dict.insert(i+1, node)\n return\n self.tmp_dict.append(node)\n\n def post_order(self, node):\n if node is None:\n return\n elif node.pre is not None and node.pre.left == node:\n node.code = node.pre.code + '0'\n elif node.pre is not None and node.pre.right == node:\n node.code = node.pre.code + '1'\n self.post_order(node.left)\n self.post_order(node.right)\n\n def leverorder(self, node):\n q = deque()\n q.append(node)\n tree_value = []\n while len(q) > 0:\n tmp_node = q.popleft()\n if tmp_node.isLeaf is True:\n tree_value.append(str(tmp_node))\n if tmp_node.left is not None:\n q.append(tmp_node.left)\n if tmp_node.right is not None:\n q.append(tmp_node.right)\n return tree_value\n\nif __name__ == '__main__':\n tree = Huffman(\"abcabcabcaaabbdddeeeee\")\n print(len(tree.tmp_dict))\n tree.encoded()" }, { "alpha_fraction": 0.7711864113807678, "alphanum_fraction": 0.7881355881690979, "avg_line_length": 38.66666793823242, "blob_id": "81d09181a79b064fa4ff5427feb6107dac15f89a", "content_id": "be0cc03feaa48139cdd07e9dba21f5d81d0251ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 64, "num_lines": 3, "path": "/softmax/__init__.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, print_function, division\nfrom . import softmax_loss\nfrom . import load_cifar10" }, { "alpha_fraction": 0.5904669165611267, "alphanum_fraction": 0.5924124717712402, "avg_line_length": 32.19355010986328, "blob_id": "f6a8faa6942d8f7f4ac705eebc75b4073dff1ee8", "content_id": "50f90553148463cebfede0c167aea057d9120f5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 81, "num_lines": 31, "path": "/nn/droupout.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# The implemention of dropout function.\nimport numpy as np\n\n\ndef dropout_forward(x, dropout_paramm):\n \"\"\"\n Inputs:\n -x: input value\n -dropout_param: this is an dictionary's type data, use these keys:\n -p: dropout activity parameter, the probabilistic of each neurons.\n -mode: 'test' or 'train',\n train: the activation probability 'p' is used to perform the\n 'and' operation with neuron.\n test: remove the activation probability 'p' and return only the input\n value.\n -seed: the seed of random\n \"\"\"\n p, mode = dropout_paramm['p'], dropout_paramm['mode']\n retrain_p = 1 - p\n if 'seed' in dropout_paramm:\n np.random.seed(dropout_paramm['seed'])\n mask = None\n out = None\n if mode == 'train':\n mask = np.random.binomial(1, retrain_p, x.shape)\n out = x*mask\n elif mode == 'test':\n out = x\n cache = (dropout_paramm, mask)\n out = out.astype(x.dtype, copy=False)\n return out, cache" }, { "alpha_fraction": 0.5136484503746033, "alphanum_fraction": 0.5322902798652649, "avg_line_length": 36.02469253540039, "blob_id": "8639650fb36308ce05b26d4ac94d3d3359d793ca", "content_id": "e50db957af4c39bb48b6ac341193043ba2ddf9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3194, "license_type": "no_license", "max_line_length": 95, "num_lines": 81, "path": "/operate_sys/priority_algorithm.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# priority algorithm.优先级调度算法 + 时间片轮回算法python实现\nfrom collections import deque\nimport time\nimport random\n\nclass PCB:\n\n def __init__(self, priority, hold_time, come_time, status = 0):\n \"\"\"\n :param status: PCB 进程控制块状态:0为等待态, 1为执行态\n :param priority: 优先级别:0, 1, 2, 3, 4, ...\n :param hold_time: 处理机占用时间\n :param come_time: 进入进程队列时间\n \"\"\"\n self.status = status\n self.priority = priority\n self.hold_time = hold_time\n self.come_time = come_time\n self.progress_name = random.randint(1000, 9999)\n\n def __str__(self):\n return str(self.progress_name) + \"\\t\" + str(self.priority) + \"\\t\\t\\t\" + \\\n str(self.come_time) + \"\\t\\t\\t\" + str(self.hold_time) + \"\\t\\t\\t\\t\" + \\\n str('就绪态' if self.status == 0 else '执行态')\n\nclass PriorityAlgorithm:\n\n def __init__(self):\n self.progresses = []\n self.__progresses_in()\n self.max_index = -1\n\n def __progresses_in(self):\n self.progresses.append(PCB(1, 10, 0))\n self.progresses.append(PCB(3, 3, 0.2))\n self.progresses.append(PCB(2, 2, 0.4))\n self.progresses.append(PCB(5, 5, 0.5))\n self.progresses.append(PCB(4, 1, 0.8))\n self.progresses.append(PCB(7, 9, 1.2))\n self.progresses.append(PCB(6, 6, 1.5))\n\n def print_all_progresses(self):\n print(\"当前队列中就绪的进程有:\")\n print(str('name') + \"\\t\" + str('priority') + \"\\t\" + \\\n str('come_time') + \"\\t\" + str('execute_time') + \"\\t\" + \\\n str('status'))\n for i in range(len(self.progresses)):\n print(self.progresses[i])\n\n def execute_progress(self):\n while len(self.progresses) > 0:\n for i in range(len(self.progresses)):\n self.max_index = self.get_max_priority_index()\n self.progresses[self.max_index].hold_time -= 1\n if self.progresses[self.max_index].hold_time <= 0:\n self.progresses.remove(self.progresses[self.max_index])\n self.print_all_progresses()\n time.sleep(1)\n self.__change_priority()\n print(\"===============================一次时间片轮回后结果:================================\")\n self.print_all_progresses()\n time.sleep(1)\n\n def get_max_priority_index(self):\n max_index = 0\n for i in range(len(self.progresses)):\n if self.progresses[i].priority > self.progresses[max_index].priority:\n max_index = i\n self.progresses[max_index].priority = self.progresses[max_index].priority - 10\n return max_index\n\n def __change_priority(self):\n for i in range(len(self.progresses)):\n self.progresses[i].priority += 10\n\nif __name__ == \"__main__\":\n priority = PriorityAlgorithm()\n print(\"初始化队列进程:===================================\")\n priority.print_all_progresses()\n print(\"===================end============================\")\n priority.execute_progress()\n\n\n\n\n\n" }, { "alpha_fraction": 0.5670401453971863, "alphanum_fraction": 0.5875195860862732, "avg_line_length": 39.89655303955078, "blob_id": "50916099377fff1e9b2d4ec99c8a1faf9a243fd9", "content_id": "437902e09227f9f145e375655951bab8a62c8cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9603, "license_type": "no_license", "max_line_length": 158, "num_lines": 203, "path": "/reinforcement_learning/DQN/exp1/tutorial_DQN.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "\"\"\"\nDeep Q-Network Q(a, s)\n-----------------------\nTD Learning, Off-Policy, e-Greedy Exploration (GLIE).\nQ(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A))\ndelta_w = R + lambda * Q(newS, newA)\nSee David Silver RL Tutorial Lecture 5 - Q-Learning for more details.\nReference\n----------\noriginal paper: https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf\nEN: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.5m3361vlw\nCN: https://zhuanlan.zhihu.com/p/25710327\nNote: Policy Network has been proved to be better than Q-Learning, see tutorial_atari_pong.py\nEnvironment\n-----------\n# The FrozenLake v0 environment\nhttps://gym.openai.com/envs/FrozenLake-v0\nThe agent controls the movement of a character in a grid world. Some tiles of\nthe grid are walkable, and others lead to the agent falling into the water.\nAdditionally, the movement direction of the agent is uncertain and only partially\ndepends on the chosen direction. The agent is rewarded for finding a walkable\npath to a goal tile.\nSFFF (S: starting point, safe)\nFHFH (F: frozen surface, safe)\nFFFH (H: hole, fall to your doom)\nHFFG (G: goal, where the frisbee is located)\nThe episode ends when you reach the goal or fall in a hole. You receive a reward\nof 1 if you reach the goal, and zero otherwise.\nPrerequisites\n--------------\ntensorflow>=2.0.0a0\ntensorlayer>=2.0.0\nTo run\n-------\npython tutorial_DQN.py --train/test\n\"\"\"\nimport argparse\nimport time\nimport gym\nimport numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\n\n# add arguments in command --train/test\n# 关于argparase的应用,可以看看我这篇知乎专栏:\n# 小段文讲清argparse模块基本用法[小番外]\n# https://zhuanlan.zhihu.com/p/111010774\n# 注意:原代码默认为test,我改为了train。\nparser = argparse.ArgumentParser(description='Train or test neural net motor controller.')\nparser.add_argument('--train', dest='train', action='store_true', default=True)\nparser.add_argument('--test', dest='test', action='store_true', default=False)\nargs = parser.parse_args()\n\ntl.logging.set_verbosity(tl.logging.DEBUG)\n\n##################### hyper parameters ####################\nlambd = .99 # 折扣率(decay factor)\ne = 0.1 # epsilon-greedy算法参数,越大随机性越大,越倾向于探索行为。\nnum_episodes = 10000 # 迭代次数\nrender = False # 是否渲染游戏\nrunning_reward = None\n\n\n##################### DQN ##########################\n\n## 把分类的数字表示,变成onehot表示。\n# 例如有4类,那么第三类变为:[0,0,1,0]的表示。\ndef to_one_hot(i, n_classes=None):\n a = np.zeros(n_classes, 'uint8') # 这里先按照分类数量构建一个全0向量\n a[i] = 1 # 然后点亮需要onehot的位数。\n return a\n\n\n## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function.\n# encoding for state: 4x4 grid can be represented by one-hot vector with 16 integers.\ndef get_model(inputs_shape):\n '''\n 定义Q网络模型:\n 1. 注意输入的shape和输出的shape\n 2. W_init和b_init是模型在初始化的时候,控制初始化参数的随机。该代码中用正态分布,均值0,方差0.01的方式初始化参数。\n '''\n ni = tl.layers.Input(inputs_shape, name='observation')\n nn = tl.layers.Dense(4, act=None, W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')(ni)\n return tl.models.Model(inputs=ni, outputs=nn, name=\"Q-Network\")\n\n\ndef save_ckpt(model): # save trained weights\n '''\n 保存参数\n '''\n tl.files.save_npz(model.trainable_weights, name='dqn_model.npz')\n\n\ndef load_ckpt(model): # load trained weights\n '''\n 加载参数\n '''\n tl.files.load_and_assign_npz(name='dqn_model.npz', network=model)\n\n\nif __name__ == '__main__':\n\n qnetwork = get_model([None, 16]) # 定义inputshape[None,16]。16是state数量\n qnetwork._train_op() # 调用tensorlayer的时候,需要标注这个模型是否可以训练。(再次吐槽tenorlayers...)\n train_weights = qnetwork.trainable_weights # 模型的参数\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01) # 定义优化器\n env = gym.make('FrozenLake-v0') # 定义环境\n\n # ======开始训练=======\n if args.train:\n t0 = time.time()\n for i in range(num_episodes):\n ## 重置环境初始状态\n s = env.reset()\n rAll = 0\n for j in range(99): # 最多探索99步。因为环境状态比较少,99步一般也够探索到最终状态了。\n if render: env.render()\n\n ## 把state放入network,计算Q值。\n ## 注意,这里先把state进行onehote处理,这里注意解释下什么是onehot\n ## 输出,这个状态下,所有动作的Q值,也就是说,是一个[None,4]大小的矩阵\n allQ = qnetwork(np.asarray([to_one_hot(s, 16)], dtype=np.float32)).numpy()\n\n # 在矩阵中找最大的Q值的动作\n a = np.argmax(allQ, 1)\n\n # e-Greedy:如果小于epsilon,就智能体随机探索。否则,就用最大Q值的动作。\n if np.random.rand(1) < e:\n a[0] = env.action_space.sample()\n\n # 输入到环境,获得下一步的state,reward,done\n s1, r, d, _ = env.step(a[0])\n\n # 把new-state 放入,预测下一个state的**所有动作**的Q值。\n Q1 = qnetwork(np.asarray([to_one_hot(s1, 16)], dtype=np.float32)).numpy()\n\n ##=======计算target=======\n ## 构建更新target:\n # Q'(s,a) <- Q(s,a) + alpha(r + lambd * maxQ(s',a') - Q(s, a))\n maxQ1 = np.max(Q1) # 下一个状态中最大Q值.\n targetQ = allQ # 用allQ(现在状态的Q值)构建更新的target。因为只有被选择那个动作才会被更新到。\n targetQ[0, a[0]] = r + lambd * maxQ1\n\n ## 利用自动求导 进行更新。\n with tf.GradientTape() as tape:\n _qvalues = qnetwork(np.asarray([to_one_hot(s, 16)], dtype=np.float32)) # 把s放入到Q网络,计算_qvalues。\n # _qvalues和targetQ的差距就是loss。这里衡量的尺子是mse\n _loss = tl.cost.mean_squared_error(targetQ, _qvalues, is_mean=False)\n # 同梯度带求导对网络参数求导\n grad = tape.gradient(_loss, train_weights)\n # 应用梯度到网络参数求导\n optimizer.apply_gradients(zip(grad, train_weights))\n\n # 累计reward,并且把s更新为newstate\n rAll += r\n s = s1\n\n # 更新epsilon,让epsilon随着迭代次数增加而减少。\n # 目的就是智能体越来越少进行“探索”\n if d == True:\n e = 1. / ((i / 50) + 10)\n break\n\n ## 这里的running_reward用于记载每一次更新的总和。为了能够更加看清变化,所以大部分是前面的。只有一部分是后面的。\n running_reward = rAll if running_reward is None else running_reward * 0.99 + rAll * 0.01\n # print(\"Episode [%d/%d] sum reward: %f running reward: %f took: %.5fs \" % \\\n # (i, num_episodes, rAll, running_reward, time.time() - episode_time))\n print('Episode: {}/{} | Episode Reward: {:.4f} | Running Average Reward: {:.4f} | Running Time: {:.4f}' \\\n .format(i, num_episodes, rAll, running_reward, time.time() - t0))\n save_ckpt(qnetwork) # save model\n\n ##============这部分是正式游戏了========\n # 这部分就不讲解了,和训练一样。只是少了epsilon-greedy。\n if args.test:\n t0 = time.time()\n load_ckpt(qnetwork) # load model\n for i in range(num_episodes):\n ## Reset environment and get first new observation\n episode_time = time.time()\n s = env.reset() # observation is state, integer 0 ~ 15\n rAll = 0\n for j in range(99): # step index, maximum step is 99\n if render: env.render()\n\n ## Choose an action by greedily (with e chance of random action) from the Q-network\n allQ = qnetwork(np.asarray([to_one_hot(s, 16)], dtype=np.float32)).numpy()\n a = np.argmax(allQ, 1) # no epsilon, only greedy for testing\n\n ## Get new state and reward from environment\n s1, r, d, _ = env.step(a[0])\n rAll += r\n s = s1\n ## Reduce chance of random action if an episode is done.\n if d == True:\n # e = 1. / ((i / 50) + 10) # reduce e, GLIE: Greey in the limit with infinite Exploration\n break\n\n ## Note that, the rewards here with random action\n running_reward = rAll if running_reward is None else running_reward * 0.99 + rAll * 0.01\n # print(\"Episode [%d/%d] sum reward: %f running reward: %f took: %.5fs \" % \\\n # (i, num_episodes, rAll, running_reward, time.time() - episode_time))\n print('Episode: {}/{} | Episode Reward: {:.4f} | Running Average Reward: {:.4f} | Running Time: {:.4f}' \\\n .format(i, num_episodes, rAll, running_reward, time.time() - t0))" }, { "alpha_fraction": 0.5727762579917908, "alphanum_fraction": 0.6010781526565552, "avg_line_length": 27.01886749267578, "blob_id": "16ddb76d4bc4ca9ec9010ee1eea38d8a319c9ed7", "content_id": "22da5a7b8cd6cff4288c215b2346e212243cb624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1538, "license_type": "no_license", "max_line_length": 81, "num_lines": 53, "path": "/KNN/KNN.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding utf-8 -*-\nfrom os import listdir\n#将图片文件转换为向量\ndef img2vector(filename):\n with open(filename) as fobj:\n arr = fobj.readlines()\n vec, demension = [], len(arr)\n for i in range(demension):\n line = arr[i].strip()\n for j in range(demension):\n vec.append(int(line[j]))\n return vec\n#读取训练数据\ndef createDataset(dir):\n dataset, labels = [], []\n files = listdir(dir)\n for filename in files:\n label = int(filename[0])\n labels.append(label)\n dataset.append(img2vector(dir + '/' + filename))\n\n return dataset, labels\n\n#计算谷本系数\ndef tanimoto(vec1, vec2):\n c1, c2, c3 = 0, 0, 0\n for i in range(len(vec1)):\n if vec1[i] == 1: c1 += 1\n if vec2[i] == 1: c2 += 1\n if vec1[i] == 1 and vec2[i] == 1: c3 += 1\n\n return c3 / (c1 + c2 - c3)\n\ndef classify(dataset, labels, testData, k=20):\n distances = []\n\n for i in range(len(labels)):\n d = tanimoto(dataset[i], testData)\n distances.append((d, labels[i]))\n\n distances.sort(reverse=True)\n #key label, value count of the label\n klabelDict = {}\n for i in range(k):\n klabelDict.setdefault(distances[i][1], 0)\n klabelDict[distances[i][1]] += 1 / k\n\n #按value降序排序\n predDict = sorted(klabelDict.items(), key=lambda item: item[1], reverse=True)\n return predDict\ndataset, labels = createDataset('trainingDigits')\ntestData = img2vector('testDigits/8_19.txt')\nprint(classify(dataset, labels, testData))" }, { "alpha_fraction": 0.3049386739730835, "alphanum_fraction": 0.32383161783218384, "avg_line_length": 33.28409194946289, "blob_id": "e940065bf6ba4d6b5e5153695d8a2901be1acbd7", "content_id": "37e1b159b4bb4b556a3339cf456a25fb90040554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3301, "license_type": "no_license", "max_line_length": 105, "num_lines": 88, "path": "/algorithm/myenum.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 暴力求解法:1.简单枚举\n\ndef division():\n '''\n 输入整数n,按从小到大的顺序输出所有形如 abcde/fjhij = n的表达式, 其中a~j恰好为数字0~9的一个排列\n :return:\n '''\n n = input(\"请输入一个正整数n, 2<=n<=79\\n\")\n for a in range(1, 10):\n for b in range(10):\n if a == b:\n continue\n else:\n for c in range(10):\n if c==a or c==b:\n continue\n else:\n for d in range(10):\n if d==c or d==b or d==a:\n continue\n else:\n for e in range(10):\n if e==d or e==c or e==b or e==a:\n continue\n else:\n value = a*10000 + b*1000 + c*100 + d*10 + e\n if value % int(n) == 0:\n div = value // int(n)\n str_value = str(value)\n str_div = str(div)\n\n set_str_div = set(str_div)\n set_str_value = set(str_value)\n if '0' in set_str_value or '0' in set_str_div:\n sets = set_str_div | set_str_value\n else:\n set_str_div = set('0') | set_str_div\n sets = set_str_div | set_str_value\n\n if len(sets) == 10:\n print(str(value) + \"\\t/\\t0\" + str_div + \"\\t=\\t\" + str(n))\n else:\n continue\n\ndef maximum_product():\n '''\n 最大乘积。输入n个元素组成的序列S,你需要找出一个乘积最大的连续子序列, 如果这个最大的成绩不是正数, 应输出0,表示无解。\n :return:\n '''\n n = input(\"请输入序列长度:\\n\")\n si = []\n for i in range(int(n)):\n sii = input(\"元素值:\")\n si.append(int(sii))\n max = 0\n tmp_max = 1\n for i in range(int(n)-1, -1, -1):\n for j in range(i+1):\n tmp_max *= si[j]\n if tmp_max > max:\n max = tmp_max\n tmp_max = 1\n for i in range(int(n)):\n for j in range(i, int(n)):\n tmp_max *= si[j]\n if tmp_max > max:\n max = tmp_max\n tmp_max = 1\n\n print(max)\n\ndef fraction_again():\n '''\n 分数拆分,输入正整数k, 找到所有x>=y, 使得 1/k = 1/x + 1/y\n :return:\n '''\n k = input(\"请输入k值:\")\n for y in range(1, 2*int(k)):\n x = y\n while True:\n if x*y // (x+y) == k and x*y % (x+y) == 0:\n print(\"1/k = 1/\" + str(x) + \" 1/\" + str(y))\n break\n x += 1\nif __name__ == '__main__':\n # division()\n # maximum_product()\n fraction_again()\n" }, { "alpha_fraction": 0.5171740651130676, "alphanum_fraction": 0.522053062915802, "avg_line_length": 26.11640167236328, "blob_id": "79f7cba59c36053a46046091336cc39c73c6ec4e", "content_id": "f5464fa1927cd1a01b839e2ab9c0f68c12a3cdda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5374, "license_type": "no_license", "max_line_length": 92, "num_lines": 189, "path": "/tree/Tree.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "\"\"\"\nvalue structure --- Tree\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nfrom collections import deque\n\nclass Solution:\n\n def __init__(self):\n self.values = []\n self.array = None\n\n def preorderTraversal(self, node):\n if node is None:\n return\n else:\n self.values.extend(node.value)\n self.preorderTraversal(node.left)\n self.preorderTraversal(node.right)\n \n def midorderTreacersal(self, node):\n if node is None:\n return\n else:\n self.midorderTreacersal(node.left)\n self.values.extend(node.value)\n self.midorderTreacersal(node.right)\n\n def postorderTreacersal(self, node):\n if node is None:\n return\n else:\n self.postorderTreacersal(node.left)\n self.postorderTreacersal(node.right)\n self.values.extend(node.value)\n \n def leverorder(self, node):\n '''\n 二叉树层序遍历, 借助队列的数据结构,先进先出的是popleft()函数\n '''\n q = deque()\n q.append(node)\n tree_value = []\n while len(q) > 0:\n tmp_node = q.popleft()\n tree_value.append(tmp_node.value)\n if tmp_node.left is not None:\n q.append(tmp_node.left)\n if tmp_node.right is not None:\n q.append(tmp_node.right)\n return tree_value\n\n def sawtooth_order(self, node):\n flag = 1\n q = deque()\n tree_value = []\n if node is not None:\n q.append(node)\n else:\n raise Exception('node is None!')\n while len(q) > 0:\n tmp_value = q.popleft()\n tree_value.append(tmp_value.value)\n if flag == 0:\n if tmp_value.right is not None:\n q.append(tmp_value.right)\n if tmp_value.left is not None:\n q.append(tmp_value.left)\n flag = 1\n else:\n if tmp_value.left is not None:\n q.append(tmp_value.left)\n if tmp_value.right is not None:\n q.append(tmp_value.right)\n flag = 0\n return tree_value\n\n '''\n 二叉树的顺序存储\n 根节点position: n, 左子树:2*n+1, 右子树:2*n+2\n '''\n def tree2array(self, root, len):\n self.array = [None] * len\n self.__toArrar(root, 0)\n\n def __toArrar(self, node, pos):\n if node is None:\n return\n self.array[pos] = node.value\n self.__toArrar(node.left, 2*pos + 1)\n self.__toArrar(node.right, 2*pos + 2)\n\n '''\n 二叉树最小深度\n '''\n def min_depth(self, node):\n if node is None:\n return 0\n if node.left is not None:\n if node.right is not None:\n return self.__min(self.min_depth(node.left), self.min_depth(node.right)) + 1\n else:\n return self.min_depth(node.left)\n elif node.right is not None:\n return self.min_depth(node.right) + 1\n else:\n return 1\n\n def __min(self, a, b):\n if a>=b:\n return b\n else:\n return a\n\n '''\n 二叉树最大深度\n '''\n def max_depth(self, node):\n if node is None:\n return 0\n else:\n return self.__max(self.max_depth(node.left), self.max_depth(node.right)) + 1\n\n def __max(self, a, b):\n if a >= b:\n return a\n else:\n return b\n \n '''\n 判断二叉树是否相同\n '''\n def isSampleTree(self, treeA, treeB):\n if treeA is None:\n return treeB is None\n if treeB is None:\n return False\n return (treeA.value == treeB.value) and self.isSampleTree(treeA.left, treeB.left) \\\n and self.isSampleTree(treeA.right, treeB.right)\n\n\nclass TreeNode:\n\n def __init__(self, val=None, left=None, right=None):\n self.value = val\n self.left = left\n self.right = right\n\nif __name__ == '__main__':\n '''\n D\n B E\n A C G\n F\n '''\n # 构建树\n tree = TreeNode('D', \n TreeNode('B', TreeNode('A'), TreeNode('C')), \\\n TreeNode('E', TreeNode('G', right=TreeNode('F'))))\n solution = Solution()\n print('前序遍历:')\n solution.preorderTraversal(tree)\n print(solution.values)\n print('中序遍历:')\n solution.values = []\n solution.midorderTreacersal(tree)\n print(solution.values)\n print('后序遍历:')\n solution.values = []\n solution.postorderTreacersal(tree)\n print(solution.values)\n print('层序遍历:')\n values = solution.leverorder(tree)\n print(values)\n print('二叉树的顺序存储:')\n solution.tree2array(tree, 16)\n print(solution.array)\n print(\"二叉树最小深度:\")\n print(solution.min_depth(tree))\n print(\"二叉树最大深度:\")\n print(solution.max_depth(tree))\n print(\"判断二叉树是否相等\")\n treeB = TreeNode('D', \\\n TreeNode('B', TreeNode('A'), TreeNode('C')), \\\n TreeNode('E', TreeNode('G', right=TreeNode('F'))))\n print(solution.isSampleTree(tree, treeB))\n print(\"sawtooth order:\")\n print(solution.sawtooth_order(tree))" }, { "alpha_fraction": 0.44952794909477234, "alphanum_fraction": 0.4727668762207031, "avg_line_length": 26, "blob_id": "20e94d2ecbc581a1e34d2605a39a8824c103a2fb", "content_id": "0aa3b22c4717e81e409198fad25e0d34bc9292e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 106, "num_lines": 51, "path": "/recomend/cf/user_based_cf.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\n\n\ndef prepare_data():\n # user based collaborative filtering\n data = pd.read_csv('./data.txt', ',', index_col=0, header=None, names=['I1', 'I2', 'I3', 'I4']).values\n means = []\n for line in data:\n sum = 0\n count = 0\n for num in line:\n if not np.isnan(num):\n sum += num\n count += 1\n means.append(sum / count)\n var = []\n i = 0\n for line in data:\n sum = 0\n for num in line:\n if not np.isnan(num):\n sum += (num - means[i])**2\n var.append(sum**0.5)\n i += 1\n corr = np.zeros([data.shape[0], data.shape[0]],dtype=np.float32)\n\n for i in range(data.shape[0]):\n for j in range(data.shape[0]):\n sum = 0\n for x,y in zip(data[i], data[j]):\n if not np.isnan(x) and not np.isnan(y):\n sum += (x-means[i])*(y-means[j])\n corr[i,j] = (sum / (var[i] * var[j])) if (var[i] * var[j]) != 0 else 0\n\n # 估计u1会对i2打多少分\n sum = 0\n for u in range(data.shape[0]):\n if not np.isnan(data[u, 1]):\n sum += (data[u, 1] - means[u]) * corr[0, u]\n result = means[0] + sum / np.sum(np.abs(corr[0]))\n #\n print(result)\n\n # print(means)\n # print(var)\n # print(corr)\n\n\nif __name__ == '__main__':\n prepare_data()\n" }, { "alpha_fraction": 0.6298472285270691, "alphanum_fraction": 0.6850763559341431, "avg_line_length": 35.91304397583008, "blob_id": "10c05afa191ee1e0961c38829c910e1fe160c699", "content_id": "70e187cf02a0e0c6c1c2aa8bf0cc356cecaba82c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/reinforcement_learning/Dyna-q/world_model.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from tensorflow.keras import Model\nfrom tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D\nfrom tensorflow.keras import layers\n\ninput_image = Input((64, 64, 3), name='input_state')\n\nx = Conv2D(32, (3, 3), activation='relu', padding='same')(input_image)\nx = MaxPooling2D((2, 2), padding='same')(x)\nx = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n\nencoded = MaxPooling2D((2, 2))(x)\n\nx = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)\nx = layers.UpSampling2D((2, 2))(x)\nx = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\nx = layers.UpSampling2D((2, 2))(x)\ndecoded = Conv2D(8, (3, 3), activation='relu', padding='same')(x)\n\nautoencoder = Model(input_image, decoded)\nencoder = Model(input_image, encoded)\n\nencoded_input = Input(shape=(32, 32, 32))\ndecoder = Model(encoded_input, decoded)\n\n\n" }, { "alpha_fraction": 0.5580966472625732, "alphanum_fraction": 0.5894503593444824, "avg_line_length": 37.74285888671875, "blob_id": "0704d6e81b9719a78b87bc03b3d4a7d2b43d47ac", "content_id": "ac3f96099ceee72280b8b10cccd16c3b45d2293e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2711, "license_type": "no_license", "max_line_length": 119, "num_lines": 70, "path": "/ml/car_classification/data_process.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# vehicle classification.\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\npd.set_option('display.max_columns', None)\n\ndef load_data():\n col_names = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'class']\n data = pd.read_csv('./car.csv', names=col_names)\n return data\n\ndef convert2onehot(data):\n return pd.get_dummies(data, prefix=data.columns).values.astype(np.float32)\n\ndef model():\n data = load_data()\n np_data = convert2onehot(data)\n np.random.shuffle(np_data)\n sep = int(0.7 * np_data.shape[0])\n train_data = np_data[:sep, :]\n test_data = np_data[sep:, :]\n\n tf_input = tf.placeholder(tf.float32, shape=[None, train_data.shape[1]], name='input')\n tfx = tf_input[:, :21]\n tfy = tf_input[:, 21:]\n\n l1 = tf.layers.dense(tfx, 28, activation=tf.nn.relu, name='l1')\n l2 = tf.layers.dense(l1, 128, activation=tf.nn.sigmoid, name='l2')\n l3 = tf.layers.dense(l2, 4, name='l3')\n prediction = tf.nn.softmax(l3, name='pred')\n\n loss = tf.losses.softmax_cross_entropy(onehot_labels=tfy, logits=l3)\n accuracy = tf.metrics.accuracy(labels=tf.arg_max(tfy, 1), predictions=tf.arg_max(prediction, 1))[1]\n opt = tf.train.GradientDescentOptimizer(0.1)\n train_op = opt.minimize(loss)\n sess= tf.Session()\n sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\n\n plt.ion()\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))\n accuracies, steps = [], []\n\n for t in range(40000):\n batch_index = np.random.randint(len(train_data), size=32)\n sess.run(train_op, feed_dict={tf_input: train_data[batch_index]})\n if t % 50 == 0:\n acc_, pred_, loss_ = sess.run([accuracy, prediction, loss], feed_dict={tf_input: test_data})\n print('step: %i' % t, '|Accuracy is %.2f'%acc_, '|loss is: %.2f'%loss_)\n accuracies.append(acc_)\n steps.append(t)\n\n # visualize testing\n ax1.cla()\n for c in range(4):\n bp = ax1.bar(c + 0.1, height=sum((np.argmax(pred_, axis=1) == c)), width=0.2, color='red')\n bt = ax1.bar(c - 0.1, height=sum((np.argmax(test_data[:, 21:], axis=1) == c)), width=0.2, color='blue')\n ax1.set_xticks(range(4), [\"accepted\", \"good\", \"unaccepted\", \"very good\"])\n ax1.legend(handles=[bp, bt], labels=[\"prediction\", \"target\"])\n ax1.set_ylim((0, 400))\n ax2.cla()\n ax2.plot(steps, accuracies, label=\"accuracy\")\n ax2.set_ylim(ymax=1)\n ax2.set_ylabel(\"accuracy\")\n plt.pause(0.01)\n plt.ioff()\n plt.show()\n\nif __name__ == '__main__':\n model()" }, { "alpha_fraction": 0.4559352397918701, "alphanum_fraction": 0.45773380994796753, "avg_line_length": 22.617021560668945, "blob_id": "a10411711f8b9a9a6e8675fbbf0cd1d189ace1ab", "content_id": "0e2e4a57780393700bb42a9762556d867c04901d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1118, "license_type": "no_license", "max_line_length": 56, "num_lines": 47, "path": "/tree/Trie.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 字典树\nclass TrieNode:\n def __init__(self):\n self.map = {}\n self.isLeaf = False\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n root = self.root\n for ch in word:\n if ch not in root.map:\n root.map[ch] = TrieNode()\n root = root.map[ch]\n root.isLeaf = True\n\n def search(self, word):\n root = self.root\n i = 0\n for ch in word:\n if ch not in root.map:\n return False\n else:\n i += 1\n if i==len(word) and root.map[ch].isLeaf:\n return True\n root = root.map[ch]\n return False\n\n def start_with(self, prefix):\n root = self.root\n for ch in prefix:\n if ch not in root.map:\n return False\n else:\n root = root.map[ch]\n return True \n\n\nif __name__ == '__main__':\n trie = Trie()\n trie.insert(\"hello\")\n result = trie.search(\"hello\")\n print(result)\n print(trie.start_with('hel'))\n\n\n" }, { "alpha_fraction": 0.5193057060241699, "alphanum_fraction": 0.5278072953224182, "avg_line_length": 41.13432693481445, "blob_id": "587b4f4edee4e81468f4b30f1de39787d645eee6", "content_id": "4523200eca7d017473da23d030fc263a43671b30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5652, "license_type": "no_license", "max_line_length": 127, "num_lines": 134, "path": "/reinforcement_learning/Dyna-q/dyna_q_brain.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow._api.v2.compat.v1 as tf\nfrom maze_env import DynaQMaze\n\ntf.disable_v2_behavior()\ntf.disable_eager_execution()\n\n\nclass DynaAgent:\n def __init__(self, exp_rate=0.3, lr=0.1, n_steps=5, episodes=1000, sess: tf.Session = None):\n self.maze = DynaQMaze()\n self.actions = self.maze.action_space\n self.n_actions = len(self.actions)\n self.state_actions = [] # state & action transition\n self.exp_rate = exp_rate\n self.lr = lr\n self.steps = n_steps\n self.episodes = episodes # number of episodes going to play\n self.steps_per_episode = []\n self.state = self.maze.get_current_state()\n self.Q_values = {}\n # model function\n self.model = {}\n self.maze.render()\n if sess is None:\n self.sess = tf.Session()\n else:\n self.sess = sess\n self.writer1 = tf.summary.FileWriter('./log/r-1', self.sess.graph)\n self.writer2 = tf.summary.FileWriter('./log/r-2', self.sess.graph)\n self.tmp_tensor = tf.placeholder(tf.float32)\n self.all_reward_summary = tf.summary.scalar('all_reward', self.tmp_tensor)\n self.all_cnt_summary = tf.summary.scalar('all_cnt', self.tmp_tensor)\n\n self.write_op = tf.summary.merge_all()\n\n def choose_action(self):\n # epsilon-greedy\n action_index = 0\n mx_nxt_reward = -999\n if np.random.uniform(0, 1) <= self.exp_rate:\n action_index = np.random.choice(self.n_actions)\n else:\n # greedy action\n # if all actions have same value, then select randomly\n if self.get_key(self.state) not in self.Q_values.keys():\n self.Q_values[self.get_key(self.state)] = np.zeros(self.n_actions)\n if len(set(self.Q_values[self.get_key(self.state)])) == 1:\n action_index = np.random.choice(self.n_actions)\n else:\n for a_i in range(self.n_actions):\n nxt_reward = self.Q_values[self.get_key(self.state)][a_i]\n if nxt_reward >= mx_nxt_reward:\n action_index = a_i\n mx_nxt_reward = nxt_reward\n return action_index\n\n def reset(self):\n self.maze.reset()\n self.state = self.maze.get_current_state()\n self.state_actions = []\n\n def train(self):\n self.learn(True)\n self.Q_values = {}\n self.model = {}\n self.steps = 0\n self.learn(False)\n\n def learn(self, type=True):\n self.steps_per_episode = []\n for ep in range(self.episodes):\n cnt = 0.\n while not self.maze.end:\n action_index = self.choose_action()\n self.state_actions.append((self.state, action_index))\n nxtState, reward = self.maze.step(action_index)\n # 当前state的index.\n i_state = self.get_key(self.state)\n # index of next state.\n i_state_next = self.get_key(nxtState)\n # update Q-value\n if i_state_next not in self.Q_values.keys():\n self.Q_values[i_state_next] = np.zeros(self.n_actions)\n if i_state not in self.Q_values.keys():\n self.Q_values[i_state] = np.zeros(self.n_actions)\n self.Q_values[i_state][action_index] += self.lr * (reward + np.max(list(self.Q_values[i_state_next]))\n - self.Q_values[i_state][action_index])\n # update model\n if i_state not in self.model.keys():\n self.model[i_state] = {}\n self.model[i_state][action_index] = (reward, nxtState)\n self.state = nxtState\n # loop n times to randomly update Q-value\n for _ in range(self.steps):\n # randomly choose an state\n rand_idx = np.random.choice(range(len(self.model.keys())))\n _state = list(self.model)[rand_idx]\n # randomly choose an action\n rand_idx = np.random.choice(range(len(self.model[_state].keys())))\n _action = list(self.model[_state])[rand_idx]\n _reward, _nxtState = self.model[_state][_action]\n self.Q_values[_state][_action] += self.lr * (_reward + np.max(list(self.Q_values[self.get_key(_nxtState)]))\n - self.Q_values[_state][_action])\n # end of game\n cnt += 1.\n self.steps_per_episode.append(len(self.state_actions))\n self.reset()\n np_sum = np.sum(np.hstack(self.Q_values.values())) / cnt\n summary1 = self.sess.run(self.all_reward_summary, feed_dict={self.tmp_tensor: np_sum})\n summary2 = self.sess.run(self.all_cnt_summary, feed_dict={self.tmp_tensor: cnt})\n if type:\n self.writer1.add_summary(summary1, ep)\n self.writer1.add_summary(summary2, ep)\n self.writer1.flush()\n else:\n self.writer2.add_summary(summary1, ep)\n self.writer2.add_summary(summary2, ep)\n self.writer2.flush()\n\n def get_key(self, state):\n s = ''\n for v in state:\n s += str(v)\n return s\n\n\nif __name__ == \"__main__\":\n N_EPISODES = 50\n # comparison\n sess = tf.Session()\n agent1 = DynaAgent(n_steps=5, episodes=100, sess=sess)\n # for i in range(N_EPISODES):\n agent1.train()\n" }, { "alpha_fraction": 0.7566878795623779, "alphanum_fraction": 0.7605095505714417, "avg_line_length": 31.66666603088379, "blob_id": "e31c1ce9a926ebf98832ac517c253f0c1e11a48a", "content_id": "94f7985615c7499a4903953774fdb3eac05a89df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 81, "num_lines": 24, "path": "/ml/creditcard/creditcard_upsample.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.metrics import confusion_matrix, recall_score, classification_report\nfrom sklearn.preprocessing import StandardScaler\n\n\ndata = pd.read_csv('./creditcard.csv')\ncount_class = pd.value_counts(data['Class'], sort=True)\ncount_class.plot(kind='bar')\nplt.title(\"Fraud class histogram\")\nplt.xlabel(\"Class\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nscaler = StandardScaler()\ndata['normAmount'] = scaler.fit_transform(data['Amount'].values.reshape(-1, 1))\ndata = data.drop(['Time', 'Amount'], axis=1)\nprint(data.head())\n\nX = data.iloc[:, data.columns != 'Class']\n\n" }, { "alpha_fraction": 0.5351550579071045, "alphanum_fraction": 0.5655511021614075, "avg_line_length": 40.18987274169922, "blob_id": "3f703276cb247e0552667140bcb01408c612b84c", "content_id": "eefac1d18ce4dbc797fcf99b87abdc1fe3634935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3257, "license_type": "no_license", "max_line_length": 123, "num_lines": 79, "path": "/ml/ensemble_learning/tf_model.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\n\ndef prepare_data():\n original_data = pd.read_csv(\"./heart.csv\")\n data = original_data.values\n data[:, 0] = data_normalization(data[:, 0])\n data[:, 3] = data_normalization(data[:, 3])\n data[:, 4] = data_normalization(data[:, 4])\n data[:, 7] = data_normalization(data[:, 7])\n\n X_ = data[:, :-1]\n y_ = data[:, -1]\n x_train, x_test, y_train, y_test = train_test_split(X_, y_, test_size=0.2, shuffle=True, random_state=0)\n return x_train, x_test, y_train, y_test\n\n\ndef data_normalization(data, method='max-min'):\n if method == 'max-min':\n max_value, min_value = max(data), min(data)\n data = (data - np.repeat(min_value, data.shape[0])) / (max_value - min_value)\n return data\n\n elif method == 'z-zero':\n mean = np.mean(data, axis=0)\n std = np.std(data, axis=0)\n return (data - np.repeat(mean, data.shape[0])) / std\n\n\ndef build_model(x_train, x_test, y_train, y_test):\n\n graph = tf.Graph()\n with graph.as_default():\n with graph.name_scope(\"input\"):\n X_ = tf.placeholder(dtype=tf.float32, shape=[None, 13], name=\"X\")\n y = tf.placeholder(dtype=tf.float32, shape=[None], name=\"Y\")\n\n with graph.name_scope(\"layer-1\"):\n test = tf.get_variable(\"test_variable\", initializer=tf.random_normal([19, 64]))\n w1 = tf.Variable(tf.random_normal([13, 64]), name=\"w1\")\n b1 = tf.Variable(tf.random_normal([64]), name=\"b1\")\n layer1 = tf.matmul(X_, w1) + b1\n layer1 = tf.nn.sigmoid(layer1, \"SigmoidAct\")\n res1 = tf.nn.dropout(layer1, 0.3)\n\n with graph.name_scope(\"layer-2\"):\n w2 = tf.Variable(tf.random_normal([64, 64]), name=\"w2\")\n b2 = tf.Variable(tf.random_normal([64]), name=\"b2\")\n layer2 = tf.matmul(res1, w2) + b2\n layer2 = tf.nn.sigmoid(layer2, \"SigmoidAct\")\n res2 = tf.nn.dropout(layer2, 0.3)\n\n with graph.name_scope(\"layer-3\"):\n w3 = tf.Variable(tf.random_normal([64, 1]), name=\"w3\")\n b3 = tf.Variable(tf.random_normal([1]), name=\"b3\")\n layer3 = tf.matmul(res2, w3) + b3\n res3 = tf.nn.sigmoid(layer3)\n with graph.name_scope(\"loss\"):\n loss = tf.reduce_mean(tf.square(res3 - y),name=\"loss-value\")\n tf.summary.scalar(\"loss\", loss)\n merged = tf.summary.merge_all()\n train = tf.train.GradientDescentOptimizer(0.001).minimize(loss)\n with tf.Session(graph=graph) as sess:\n train_writer = tf.summary.FileWriter(\"./train\", sess.graph)\n tf.global_variables_initializer().run(session=sess)\n\n for i in range(100000):\n _, merged_ = sess.run([train, merged], feed_dict={X_: x_train, y: y_train})\n if i % 100 == 0:\n train_writer.add_summary(merged_, i)\n print(\"setp: {}, loss value is {:.5f}:\".format(i, sess.run(loss, feed_dict={X_: x_train, y: y_train})))\n train_writer.close()\n\nif __name__ == '__main__':\n x_train, x_test, y_train, y_test = prepare_data()\n build_model(x_train, x_test, y_train, y_test)\n\n\n\n" }, { "alpha_fraction": 0.5460060834884644, "alphanum_fraction": 0.6076845526695251, "avg_line_length": 30.913978576660156, "blob_id": "02c5dd4f0cd6795c58c83bf8121b218b1c5a5000", "content_id": "2c083e5e1f41d2a5796094139fdeb4778ee9c05a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3067, "license_type": "no_license", "max_line_length": 82, "num_lines": 93, "path": "/image_process/hist.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 基于直方图均衡化的图像增强\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimage_path = \"./data/9.jpg\"\n# 直方图均衡化增强图像\ndef hist():\n image1 = cv2.imread(image_path)\n image_channel = cv2.split(image1)\n for i in range(3):\n # plt.hist(image_channel[i].ravel(), 256, [0, 256])\n # plt.show()\n cv2.equalizeHist(image_channel[i], image_channel[i])\n\n cv2.merge(image_channel, image1)\n return image1\n\n# 拉普拉斯算法增强\ndef laplus():\n image1 = cv2.imread(image_path)\n kernel = np.array([[0,-1,0],\n [-1,7,-1],\n [0,-1,0]])\n dist= cv2.filter2D(image1, cv2.CV_8UC3, kernel)\n return dist\n\n# 对数变换增强图像\ndef log_image():\n image1 = cv2.imread(image_path)\n image_log = np.uint8(15*np.log(np.array(image1)+1))\n cv2.normalize(image_log, image_log, 0, 255, cv2.NORM_MINMAX)\n cv2.convertScaleAbs(image_log, image_log)\n return image_log\n\n# gamma变换图像增强\ndef gamma():\n image = cv2.imread(image_path)\n fgamma = 2.5\n image_gamma = np.uint8(np.power((np.array(image) / 255.0), fgamma) * 255.0)\n cv2.normalize(image_gamma, image_gamma, 0, 255, cv2.NORM_MINMAX)\n cv2.convertScaleAbs(image_gamma, image_gamma)\n return image_gamma\n\ndef lin():\n \"\"\"双线性插值\"\"\"\n img = cv2.imread(\"./data/7.jpg\", cv2.IMREAD_GRAYSCALE) # load the gray image\n cv2.imwrite(\"img.jpg\", img)\n h, w = img.shape[:2]\n\n # shrink to half of the original\n a1 = np.array([[0.5, 0, 0], [0, 0.5, 0]], np.float32)\n d1 = cv2.warpAffine(img, a1, (w, h), borderValue=125)\n\n # shrink to half of the original and move\n a2 = np.array([[0.5, 0, w / 4], [0, 0.5, h / 4]], np.float32)\n d2 = cv2.warpAffine(img, a2, (w, h), flags=cv2.INTER_NEAREST, borderValue=125)\n # rotate based on d2\n a3 = cv2.getRotationMatrix2D((w / 2, h / 2), 90, 1)\n d3 = cv2.warpAffine(d2, a3, (w, h), flags=cv2.INTER_LINEAR, borderValue=125)\n\n cv2.imshow(\"img\", img)\n cv2.imshow(\"d1\", d1)\n cv2.imshow(\"d2\", d2)\n cv2.imshow(\"d3\", d3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\nif __name__ == '__main__':\n image = cv2.imread(\"/Users/wangheng/Desktop/Xnip2020-10-27_17-14-17.jpg\")\n resize = cv2.resize(image, (108, 108))\n cv2.imwrite(\"/Users/wangheng/Desktop/ressize.jpg\", resize)\n # shape = image.shape\n # height = shape[1] // 2\n # width = shape[0] // 2\n # image = cv2.resize(image, (height, width))\n # cv2.imwrite(\"data/7.jpg\", image)\n # result = hist()\n # cv2.namedWindow(\"image\", cv2.WINDOW_FREERATIO)\n # result = log_image()\n # cv2.imshow(\"image\", result)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n lin()\n # cv2.namedWindow(\"image\", cv2.WINDOW_FREERATIO)\n # original_image = cv2.imread(image_path, cv2.IMREAD_ANYCOLOR)\n # image = laplus()\n # image = hist()\n # image = log_image()\n # image = gamma()\n # cv2.imshow(\"original_image\", original_image)\n # cv2.imshow(\"image\", image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()" }, { "alpha_fraction": 0.46939387917518616, "alphanum_fraction": 0.48550236225128174, "avg_line_length": 49.556819915771484, "blob_id": "e41876d07c747f4dce8e7ae9a86c87bce6b48f4b", "content_id": "adaf41079f7c6993c316f53f0efee47f2f2ca419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13543, "license_type": "no_license", "max_line_length": 160, "num_lines": 264, "path": "/reinforcement_learning/DQN-Breakout-v4/atari_agent.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport gym\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\ntf.compat.v1.disable_eager_execution()\ntf.compat.v1.disable_v2_behavior()\n\nenv = gym.make('Breakout-v4')\n\n\nclass AgentDQN():\n def __init__(self, env, args):\n tf.reset_default_graph()\n self.env = env\n self.get_path() # 创建模型以及训练历史数据保存的路径\n self.double_q = True\n self.n_actions = self.env.action_space.n\n self.feature_shape = self.env.observation_space.shape\n self.n_features = np.prod(np.array(self.feature_shape)) # 内积, 获取到特征数量\n self.memory_size = 10000\n self.gamma = 0.99\n self.batch_size = 32\n self.n_steps = 7e6\n self.epsilon = 1.0\n self.epsilon_min = 0.07\n self.epsilon_decrement = (self.epsilon - self.epsilon_min) / 100000.\n # 每4步探索学习一次, 防止帧率过大,导致很多接近重复的frame\n self.learn_every_n_step = 4\n self.save_every_n_episode = 100\n # 前10000次采用随机探索,不学习\n self.start_learning_after_n_step = 10000\n # 每1000次更新一次target_net的参数\n self.update_network_after_n_step = 1000\n self.reward_his = []\n self.memory = np.zeros((self.memory_size, 3 + self.n_features * 2))\n # -----------------------------model-----------------------------#\n self.build_model()\n t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')\n e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net')\n with tf.variable_scope('soft_replacement'):\n self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver(max_to_keep=10)\n\n load = bool(0)\n if args.test_dqn or load:\n print(\"Loading trained model: \" + self.model_path)\n if load: self.reward_his = pickle.load(open(self.reward_his_path, 'rb'))\n try:\n self.saver.restore(self.sess, save_path=tf.train.latest_checkpoint(self.model_path))\n except:\n self.saver.restore(self.sess, save_path=os.path.join(self.model_path, 'model_dqn-25581'))\n\n def build_model(self):\n n_features_tensor = [None] + [dim for dim in self.feature_shape]\n self.s = tf.placeholder(tf.float32, n_features_tensor, name='s')\n self.s_ = tf.placeholder(tf.float32, n_features_tensor, name='s_')\n self.a = tf.placeholder(tf.float32, [None, ], name='a')\n self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target')\n\n # ------------------ initializers ------------------ #\n from tensorflow.python.ops.init_ops import VarianceScaling\n\n def lecun_normal(seed=None):\n return VarianceScaling(scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)\n\n def lecun_uniform(seed=None):\n return VarianceScaling(scale=1., mode='fan_in', distribution='uniform', seed=seed)\n\n w_initializer = lecun_normal()\n b_initializer = tf.zeros_initializer()\n\n # ------------------ build evaluate_net ------------------ #\n with tf.variable_scope('eval_net'):\n e_conv1 = tf.layers.conv2d(self.s, filters=32,\n kernel_size=[5, 5],\n strides=[4, 4],\n padding='same',\n activation=tf.nn.relu6,\n kernel_initializer=w_initializer,\n name='e_conv1')\n e_conv2 = tf.layers.conv2d(e_conv1, filters=64,\n kernel_size=[5, 5],\n strides=[2, 2],\n padding='same',\n activation=tf.nn.relu6,\n kernel_initializer=w_initializer,\n name='e_conv2')\n e_conv3 = tf.layers.conv2d(e_conv2, filters=64,\n kernel_size=[3, 3],\n strides=[1, 1],\n padding='same',\n activation=tf.nn.relu6,\n kernel_initializer=w_initializer,\n name='e_conv3')\n e_flat = tf.contrib.layers.flatten(e_conv3)\n e_dense1 = tf.layers.dense(inputs=e_flat,\n units=512,\n activation=tf.nn.relu,\n kernel_initializer=w_initializer,\n bias_initializer=b_initializer,\n name='e_dense1')\n self.q_eval = tf.layers.dense(inputs=e_dense1,\n units=self.n_actions,\n activation=None,\n kernel_initializer=w_initializer,\n bias_initializer=b_initializer,\n name='q_eval') # q_new shape: (batch_size, n_actions)\n # ------------------ build target net ------------------ #\n with tf.variable_scope('target_net'):\n t_conv1 = tf.layers.conv2d(inputs=self.s_,\n filters=32,\n kernel_size=[5, 5],\n strides=(4, 4),\n padding=\"same\",\n activation=tf.nn.relu6,\n kernel_initializer=w_initializer,\n name='t_conv1')\n t_conv2 = tf.layers.conv2d(inputs=t_conv1,\n filters=64,\n kernel_size=[5, 5],\n strides=(2, 2),\n padding=\"same\",\n activation=tf.nn.relu6,\n kernel_initializer=w_initializer,\n name='t_conv2')\n t_conv3 = tf.layers.conv2d(inputs=t_conv2,\n filters=64,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu6,\n kernel_initializer=w_initializer,\n name='t_conv3')\n t_flat = tf.contrib.layers.flatten(t_conv3)\n t_dense1 = tf.layers.dense(inputs=t_flat,\n units=512,\n activation=tf.nn.relu,\n kernel_initializer=w_initializer,\n bias_initializer=b_initializer,\n name='t_dense1')\n self.q_old = tf.layers.dense(inputs=t_dense1,\n units=self.n_actions,\n activation=None,\n kernel_initializer=w_initializer,\n bias_initializer=b_initializer,\n name='q_target')\n\n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval, name='TD_error'))\n\n with tf.variable_scope(\"train\"):\n self.train_op = tf.train.RMSPropOptimizer(learning_rate=0.0001, decay=0.99).minimize(self.loss)\n\n def store_transition(self, s, a, r, d, s_):\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n transition = np.hstack((np.reshape(s, [-1]), [a, r, int(d)], np.reshape(s_, [-1])))\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def learn(self):\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size, replace=False) # sample batch memory from all memory\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n batch_memory = self.memory[sample_index, :]\n n_features_tensor = [self.batch_size] + [dim for dim in self.feature_shape]\n # -----------------------------------------------------------------------------------#\n s = np.reshape(batch_memory[:, :self.n_features], n_features_tensor)\n actions = batch_memory[:, self.n_features].astype(int)\n rewards = batch_memory[:, self.n_features + 1]\n done = batch_memory[:, self.n_features + 2]\n s_ = np.reshape(batch_memory[:, -self.n_features:], n_features_tensor)\n # -----------------------------------------------------------------------------------#\n\n q_eval, q_old = self.sess.run([self.q_eval, self.q_old], feed_dict={self.s: s, self.s_: s_})\n q_target_ = q_old.copy()\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n if self.double_q:\n q_new4next = self.sess.run(self.q_eval, feed_dict={self.s: s_})\n maxactionnext = np.argmax(q_new4next, axis=1)\n selected_q_target = q_old[batch_index, maxactionnext]\n else:\n selected_q_target = np.max(q_old, axis=1)\n q_target_[batch_index, actions] = rewards + (1 - done) * self.gamma * selected_q_target # change q_target w.r.t q_new's action\n\n _, loss = self.sess.run([self.train_op, self.loss], feed_dict={self.s: s, self.q_target: q_target_})\n return loss\n\n def train(self):\n global action\n episode = 0\n step = 0\n loss = 9.999\n rwd_avg_max = 0\n\n while step < self.n_steps:\n\n observation = self.env.reset()\n done = False\n episode_reward = 0.0\n\n while not done:\n action = self.make_action(observation, test=True)\n observation_, reward, done, info = self.env.step(action)\n episode_reward += reward\n\n self.store_transition(observation, action, reward, done, observation_)\n if (step > self.start_learning_after_n_step) and (step % self.learn_every_n_step == 0):\n loss = self.learn()\n if (step > self.start_learning_after_n_step) and (step % self.update_network_after_n_step == 0):\n self.sess.run(self.target_replace_op)\n\n print('Step: %i, Episode: %i, Action:%i, Reward:%.2f, Epsilon: %.5f, Loss:%.5f' % (step, episode, action, reward, self.epsilon, loss), end='\\r')\n self.epsilon = self.epsilon - self.epsilon_decrement if self.epsilon > self.epsilon_min else self.epsilon_min # decreasing epsilon\n observation = observation_\n step += 1\n\n episode += 1\n self.reward_his.append(episode_reward)\n if step < 1000000:\n # 前1000,000步每100步保存一次模型数据\n if episode % self.save_every_n_episode == 0:\n pickle.dump(self.reward_his, open(self.reward_his_path, 'wb'), True)\n self.saver.save(self.sess, os.path.join(self.model_path, 'model-dqn'), global_step=episode)\n else:\n rwd_avg = np.mean(self.reward_his[-20:])\n if rwd_avg > rwd_avg_max:\n pickle.dump(self.reward_his, open(self.reward_his_path, 'wb'), True)\n self.saver.save(self.sess, os.path.join(self.model_path, 'model-dqn'), global_step=episode)\n rwd_avg_max = rwd_avg\n print(\"Saving best model with avg reward: \", rwd_avg_max)\n if episode % self.save_every_n_episode == 0:\n pickle.dump(self.reward_his, open(self.reward_his_path, 'wb'), True)\n print('Step: %i/%i, Episode: %i, Action: %i, Episode Reward: %.0f, Epsilon: %.2f, Loss: %.5f' % (\n step, self.n_steps, episode, action, episode_reward, self.epsilon, loss))\n\n def make_action(self, observation, test):\n observation = np.expand_dims(observation, axis=0)\n if test: self.epsilon = 0.01 # 如果是测试环境, 探索概率设置为1%\n if np.random.uniform() > self.epsilon:\n action_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})\n action = np.argmax(action_value)\n else:\n action = np.random.randint(0, self.n_actions)\n return action # self.env.get_random_action()\n\n def get_path(self):\n directory = \"./model\"\n try:\n if not os.path.exists(directory):\n os.mkdir(directory)\n except:\n print(\"Filed to create result directory.\")\n directory = \"./\"\n self.reward_his_path = os.path.join(directory, \"reward_his_dqn.pkl\")\n self.model_path = directory\n" }, { "alpha_fraction": 0.606082558631897, "alphanum_fraction": 0.6241853833198547, "avg_line_length": 35.342105865478516, "blob_id": "7aeaa1a95e7956d84e99402cf33bcbadcee73516", "content_id": "eb74a05b72e0f781a455193d8fb0a0decc2d7da7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1433, "license_type": "no_license", "max_line_length": 121, "num_lines": 38, "path": "/DataAnalyse/chapter01.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding utf-8 -*-\nimport numpy as np\nfrom collections import defaultdict\ndataset_filename = \"affinity_dataset.txt\"\nx = np.loadtxt(dataset_filename)\nfeatures = [\"bread\", \"milk\", \"cheese\", \"apples\", \"bananas\"]\nvalid_rules=defaultdict(int)\ninvalid_rules=defaultdict(int)\nnum_occurances=defaultdict(int)\ndef demo2():\n for sample in x:\n for premist in range(5):\n if sample[premist]==0:continue\n num_occurances[premist]+=1\n for conclusion in range(5):\n if conclusion==premist:continue\n if sample[conclusion] == 1:\n valid_rules[(premist,conclusion)] +=1\n else:\n invalid_rules[(premist,conclusion)] +=1\n support = valid_rules\n confident = defaultdict(float)\n for premist,conclusion in support.keys():\n confident[(premist,conclusion)] = support[(premist,conclusion)]/num_occurances[premist]\n for premist,conclusion in confident:\n premist_name=features[premist]\n conclusion_name=features[conclusion]\n print(\"规则:如果一个人购买了{0},他还会购买{1}的概率为:{2:.4f}\".format(premist_name,conclusion_name,confident[(premist,conclusion)]))\n print(\"支持度为:{0}\".format(support[(premist,conclusion)]))\n print((\"-\"*30))\n\ndef test():\n dd=defaultdict(int)\n dd[(1,3)]+=12\n dd[(2,4)]+=11\n for k,v in dd:\n print(v)\ndemo2()\n" }, { "alpha_fraction": 0.559066116809845, "alphanum_fraction": 0.5825527906417847, "avg_line_length": 41.325443267822266, "blob_id": "c51d4409c719d0cef6b06c4f5bb22bf5ae2b7887", "content_id": "1212b9ebcf96d3c14a2271d726fd8d9b518351e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7193, "license_type": "no_license", "max_line_length": 129, "num_lines": 169, "path": "/reinforcement_learning/SR/dsr_brain_keras.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "'''\ntensorflow: 2.3.0\n'''\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import load_model\nfrom tensorflow import keras\nfrom dsr_maze_env import DSRMaze\nimport numpy as np\n\n\nclass RL_Brain():\n def __init__(self, n_features, n_action, memory_size=10, batch_size=32, gamma=0.9, fi_size=8):\n self.n_features = n_features\n self.n_actions = n_action\n self.whole_model = self.build_model()\n self.memory_size = memory_size\n self.replay_buffer = np.zeros((self.memory_size, n_features * 2 + 2), np.float)\n self.count = 0\n self.batch_size = batch_size\n self.gamma = gamma\n self.fi_size = fi_size\n\n self.input_states = Input((self.n_features,), name='input_states')\n self.input_fi = Input((self.fi_size,), name='input_fi')\n\n self.branch_1_model = self._build_branch1(self.input_states)\n self.branch_2_model = self._build_branch2(self.input_fi)\n\n def _build_branch1(self, input_):\n encode_layer1 = Dense(10, 'relu', name='encode/layer1')(input_)\n encode_layer2 = Dense(10, 'relu', name='encode/layer2')(encode_layer1)\n fi = Dense(self.fi_size, 'relu', name='fi')(encode_layer2)\n\n decode_layer1 = Dense(10, 'relu', name='decode/layer1')(fi)\n decode_layer2 = Dense(10, 'relu', name='decode/layer2')(decode_layer1)\n decode_layer3 = Dense(2, name='decode/layer3')(decode_layer2)\n\n R = Dense(1, None, False, name='R')(fi)\n model = Model(inputs=input_, outputs=[fi, decode_layer3, R])\n\n loss = {'R': 'mse', 'decode/layer3': 'mse'}\n model.compile('adam', loss, metrics=['mse'])\n return model\n\n def _build_branch2(self, input_):\n mus = []\n loss = {}\n for i in range(self.n_actions):\n mu = Dense(5, 'relu', name='mu/m%s/layer1' % i, kernel_initializer='zero')(input_)\n mu = Dense(5, 'relu', name='mu/m%s/layer2' % i, kernel_initializer='zero')(mu)\n mu = Dense(8, 'relu', name='mu/m%s/layer3' % i, kernel_initializer='zero')(mu)\n mus.append(mu)\n loss['mu/m%s/layer3' % i] = 'mae'\n\n model = Model(inputs=input_, outputs=mus)\n model.compile(loss=loss, metrics=['mse'])\n return model\n\n def build_model(self):\n input_ = Input(shape=(self.n_features,), name='input')\n layer1 = Dense(64, 'relu', name='encode/layer1')(input_)\n layer2 = Dense(64, 'relu', name='encode/layer2')(layer1)\n layer3 = Dense(10, 'relu', name='encode/layer3')(layer2)\n fai = Dense(5, 'relu', name='fai')(layer3)\n decoder1 = Dense(10, 'relu', name='decode/layer1')(fai)\n decoder2 = Dense(64, 'relu', name='decode/layer2')(decoder1)\n decoder3 = Dense(64, 'relu', name='decode/layer3')(decoder2)\n s_hat = Dense(self.n_features, name='output_s_hat')(decoder3)\n R = Dense(1, name='R', use_bias=False)(fai)\n mus = []\n for i in range(self.n_actions):\n mu = Dense(10, 'relu', name='mu/m%s/layer1' % i, kernel_initializer='zero')(fai)\n mu = Dense(28, 'relu', name='mu/m%s/layer2' % i, kernel_initializer='zero')(mu)\n mu = Dense(5, 'relu', name='mu/m%s/layer3' % i, kernel_initializer='zero')(mu)\n mus.append(mu)\n outputs = [fai, R, s_hat]\n outputs = outputs + mus\n model = Model(inputs=input_, outputs=outputs)\n loss = {'R': 'mse', 'output_s_hat': 'mse'}\n for i in range(self.n_actions):\n loss['mu/m%s/layer3' % i] = 'mse'\n model.compile(keras.optimizers.RMSprop(), loss=loss, metrics=['mse'])\n return model\n\n def learn_theta_and_w(self, states, r):\n self.branch_1_model.fit({'input_states': states}, {'decode/layer3': states, 'R': r},batch_size=100, epochs=10, verbose=0)\n self.branch_1_model.save('./model/branch_1_model.h5')\n\n def learn_mu(self, state, state_, action_index):\n fi = self.branch_1_model.predict(state)[0]\n fi_ = self.branch_1_model.predict(state_)[0]\n\n w = self.branch_1_model.get_layer('R').get_weights()[0]\n mus_ = np.squeeze(self.branch_2_model.predict(fi_))\n max_index = np.argmax(np.squeeze(np.matmul(mus_, w)), axis=0)\n\n label = fi + self.gamma * mus_[max_index]\n outputs = {}\n for i in range(self.n_actions):\n outputs['mu/m%s/layer3' % i] = np.zeros((1, self.fi_size))\n outputs['mu/m%s/layer3' % action_index] = label\n self.branch_2_model.fit({'fi': fi}, outputs, epochs=10, verbose=1)\n\n def choose_action(self, state, is_random=False):\n if is_random:\n return np.random.choice(self.n_actions)\n w = self.branch_1_model.get_layer('R').get_weights()[0]\n fi = self.branch_1_model.predict(state)[0]\n mus = np.squeeze(self.branch_2_model.predict(fi))\n rs = np.squeeze(np.matmul(mus, w))\n if len(set(rs)) == 1:\n action_index = np.random.choice(self.n_actions)\n else:\n action_index = np.argmax(rs)\n return action_index\n\n def append_to_replay_buffer(self, s, a, r, s_):\n transition = np.hstack([s, a, r, s_])\n self.replay_buffer[self.count % self.memory_size] = transition\n self.count += 1\n\n @tf.function\n def traceme(self, input_1, input_2):\n return self.whole_model(input_1), self.branch_1_model(input_1), self.branch_2_model(input_2)\n\n def visualize_model(self):\n log_dir = './log'\n writer = tf.summary.create_file_writer(log_dir)\n tf.summary.trace_on(graph=True)\n self.traceme(tf.zeros(shape=(5, 2)), tf.zeros(shape=(1, self.fi_size)))\n with writer.as_default():\n tf.summary.trace_export('model_trace', step=0)\n # keras.utils.plot_model(self.whole_model, './dsr_model.png', True, True)\n\n\ndef load_trained_model():\n model: Model = load_model('./model/branch_1_model.h5')\n print(model.predict(np.array([[-0.875, -0.125]])))\n\n\nif __name__ == '__main__':\n eps = 100\n env = DSRMaze('dsr-maze')\n brain = RL_Brain(2, 4, memory_size=10000)\n\n for i in range(eps):\n state = env.get_current_state()\n done = False\n c = 0\n while not done:\n action_index = brain.choose_action(state)\n s_next, reward, done = env.step(action_index)\n brain.append_to_replay_buffer(np.squeeze(state), action_index, reward, np.squeeze(s_next))\n choices = np.random.choice(brain.count if brain.count < brain.memory_size else brain.memory_size, 100, replace=True)\n states = brain.replay_buffer[choices, :brain.n_features]\n r = brain.replay_buffer[choices, brain.n_features + 1]\n if brain.count > 100:\n # 首先训练第一部分: 编码器和\n if c<20:\n brain.learn_theta_and_w(states, r)\n c+=1\n # 训练第二部分:m_alpha\n brain.learn_mu(state, s_next, action_index)\n state = s_next\n if done:\n env.reset()\n# print(brain.model.get_layer('R').get_weights())\n" }, { "alpha_fraction": 0.5464481115341187, "alphanum_fraction": 0.5464481115341187, "avg_line_length": 18.210525512695312, "blob_id": "e892553c771d2e67fe117339eebad180c7648e3d", "content_id": "e3fd099a6950d5ad96fe73f8a839dbbe840a4bdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/graph/shortest_path.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 有向无环图最短路径\nfrom __future__ import absolute_import, division, print_function\n\nclass Node:\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\nclass ShortestPath:\n\n def __init__(self):\n pass\n\n def heap_sort(self, array):\n \"\"\"\n 堆排序\n :return:\n \"\"\"\n\n" }, { "alpha_fraction": 0.7819548845291138, "alphanum_fraction": 0.7819548845291138, "avg_line_length": 65.5, "blob_id": "73d51ff7be97b6be3f5f4c7915bf9f2618aa242d", "content_id": "04497f2335c454fe39f960fd87985c1b9f02ff37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 133, "license_type": "no_license", "max_line_length": 102, "num_lines": 2, "path": "/README.md", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "\n## Machine learning algorithm\n**In this section, you will know each machine learning algorithm and how to use python to implement.**" }, { "alpha_fraction": 0.6261904835700989, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 22.38888931274414, "blob_id": "76745c0ca3ac8f638e9747b453e2180898722a76", "content_id": "77f332f331a871bca08884603e89d9e93ab85de1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 44, "num_lines": 18, "path": "/tree/AVLTree.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# AVL树\nfrom __future__ import absolute_import\n\nfrom tree.binary_search_tree import TreeNode\nfrom tree.binary_search_tree import BSTree\n\nclass AVLNode(TreeNode):\n def __init__(self, value=None):\n TreeNode.__init__(self, value)\n self.height = 1\n\nclass AVLTree(BSTree):\n def __init__(self):\n super(AVLTree, self).__init__()\n\nif __name__ == '__main__':\n node = AVLNode(10)\n print(node.value)" }, { "alpha_fraction": 0.6048702001571655, "alphanum_fraction": 0.6133700609207153, "avg_line_length": 32.74418640136719, "blob_id": "c461240c99019a236653c273cad3909a652f67e7", "content_id": "306880923eeac4b64de874549bb40b673b18c3e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4479, "license_type": "no_license", "max_line_length": 129, "num_lines": 129, "path": "/reinforcement_learning/policy_gradient/torch_model.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions import Categorical\nfrom torch.nn import functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\nMAX_EPISODE = 30000\nRENDER = True\n\n\nclass DiscretePolicyGradient(nn.Module):\n def __init__(self, n_features, n_actions, num_hiddens=64):\n super(DiscretePolicyGradient, self).__init__()\n self.linear1 = nn.Linear(n_features, num_hiddens)\n self.linear2 = nn.Linear(num_hiddens, num_hiddens)\n self.output = nn.Linear(num_hiddens, n_actions)\n\n def forward(self, state):\n x = F.tanh(self.linear1(state))\n x = F.tanh(self.linear2(x))\n x = F.softmax(self.output(x), dim=1)\n return x\n\n\nclass ReplayBuffer:\n def __init__(self):\n self.replay_buffer = []\n self.count = 0\n\n def store_transition(self, s, a, r):\n self.replay_buffer.append(np.array([s, a, r]))\n self.count += 1\n\n def get_observations(self):\n return np.vstack(np.vstack(self.replay_buffer)[:, 0])\n\n def get_actions(self):\n return np.vstack(np.vstack(self.replay_buffer)[:, 1])\n\n def clear(self):\n self.replay_buffer = []\n self.count = 0\n\n def get_reward(self, i):\n return np.vstack(self.replay_buffer)[i, 2]\n\n\nclass Agent:\n def __init__(self, n_features, n_actions, num_hiddens=64):\n self.n_features = n_features\n self.n_actions = n_actions\n self.model = DiscretePolicyGradient(n_features, n_actions, num_hiddens)\n self.opt = optim.Adam(self.model.parameters())\n self.replay_buffer = ReplayBuffer()\n\n self.gamma = 0.9\n self.writer = SummaryWriter(\"./torch_cartpole_log\")\n\n # 根据当前观察选择一个action\n def choose_action(self, observation):\n if not isinstance(observation, torch.Tensor):\n observation = torch.FloatTensor(observation)\n if observation.dim() == 1:\n observation = observation.unsqueeze(0)\n # 关闭梯度计算\n with torch.no_grad():\n action_prob = self.model(observation)\n c = Categorical(action_prob)\n action_index = c.sample()[0]\n return action_index\n\n # 执行一次学习\n def learn(self):\n discounted_reward_norm: np.ndarray = self.__discount_and_norm_rewards()\n observations = torch.FloatTensor(self.replay_buffer.get_observations())\n actions = torch.LongTensor(self.replay_buffer.get_actions())\n self.model.train()\n acts_prob = self.model(observations)\n # 这里需要注意: torch求梯度只能对标量求,不能对向量求解\n loss = F.binary_cross_entropy(acts_prob,\n torch.autograd.Variable(F.one_hot(actions.squeeze(1), self.n_actions).type(torch.float32)),\n torch.from_numpy(discounted_reward_norm).unsqueeze(1))\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n # 一次学习过后, 清空replay_buffer中的全部数据\n self.replay_buffer.clear()\n return np.sum(discounted_reward_norm)\n\n # 将reward进行折扣\n def __discount_and_norm_rewards(self):\n size = self.replay_buffer.count\n discounted_rewards = np.zeros((size))\n running_add = 0\n for i in range(self.replay_buffer.count - 1, -1, -1):\n running_add = running_add * self.gamma + self.replay_buffer.get_reward(i)\n discounted_rewards[i] = running_add\n discounted_rewards -= np.mean(discounted_rewards)\n discounted_rewards /= np.std(discounted_rewards)\n return discounted_rewards\n\n\ndef run():\n env = gym.make('CartPole-v0')\n env.seed(1)\n n_actions = env.action_space.n\n n_features = env.observation_space.shape[0]\n agent = Agent(n_features, n_actions)\n for episode in range(MAX_EPISODE):\n observation = env.reset()\n while True:\n if RENDER:\n env.render()\n obs_tensor = torch.FloatTensor(observation).unsqueeze(0)\n action = agent.choose_action(obs_tensor).cpu().numpy()\n next_observation, reward, done, info = env.step(action)\n agent.replay_buffer.store_transition(observation, action, reward)\n observation = next_observation\n if done:\n agent.learn()\n break\n env.close()\n\n\nif __name__ == '__main__':\n run()\n" }, { "alpha_fraction": 0.6386191844940186, "alphanum_fraction": 0.646170437335968, "avg_line_length": 32.10714340209961, "blob_id": "790fd2bcd71663d81b4aedeec631635d2b731c19", "content_id": "4877e34e3ee76a03719707f7beb59c134a52eab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1854, "license_type": "no_license", "max_line_length": 96, "num_lines": 56, "path": "/reinforcement_learning/DQN-Breakout-v4/runner.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import argparse\nfrom atari_agent import AgentDQN\nfrom environment import Environment\nimport numpy as np\n\nseed = 11037\n\n\ndef parse():\n parser = argparse.ArgumentParser(description=\"runner\")\n parser.add_argument('--env_name', default=None, help='environment name')\n parser.add_argument('--train_pg', action='store_true', help='whether train policy gradient')\n parser.add_argument('--train_dqn', action='store_true', help='whether train DQN')\n parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')\n parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')\n parser.add_argument('--video_dir', default=None, help='output video directory')\n parser.add_argument('--do_render', action='store_true', help='whether render environment')\n args = parser.parse_args()\n return args\n\n\ndef run(args):\n if args.train_dqn:\n env_name = args.env_name or 'BreakoutNoFrameskip-v4'\n env = Environment(env_name, args, atari_wrapper=True)\n agent = AgentDQN(env, args)\n agent.train()\n\n if args.test_dqn:\n env = Environment('BreakoutNoFrameskip-v4', args, atari_wrapper=True, test=True)\n agent = AgentDQN(env, args)\n test(agent, env, total_episodes=100)\n\n\ndef test(agent, env, total_episodes=30):\n rewards = []\n env.seed(seed)\n for i in range(total_episodes):\n state = env.reset()\n done = False\n episode_reward = 0.0\n\n # playing one game\n while not done:\n action = agent.make_action(state, test=True)\n state, reward, done, info = env.step(action)\n episode_reward += reward\n\n rewards.append(episode_reward)\n print('Run %d episodes' % (total_episodes))\n print('Mean:', np.mean(rewards))\n\n\nif __name__ == '__main__':\n args = parse()\n run(args)\n" }, { "alpha_fraction": 0.5275590419769287, "alphanum_fraction": 0.6535432934761047, "avg_line_length": 18.615385055541992, "blob_id": "5f0e1e423f39771fc79c5a5aeefee9654c287a52", "content_id": "7e93710f7becf8a8bd8b79c464cd5f7b8a7cf8b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 31, "num_lines": 13, "path": "/graph/graph_01.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as pt\nw=[1,2,5,2,7]\nx=[12,23,11,23,11]\ny=[1,3,8,4,0]\nz=[17,11,12,19,10]\n\npt.plot(x,y,lw=2,label='Marry')\npt.plot(w,z,lw=2,label='Tom')\npt.xlabel('month')\npt.ylabel('dollars(million)')\npt.legend()\npt.title('Proper test')\npt.show()" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 11, "blob_id": "732e65288f9631c96dd90d369aba1038db05d531", "content_id": "4ae76a758b4e849e237c89b1f0a4a49507555b59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 11, "num_lines": 1, "path": "/DataAnalyse/credit_card/README.md", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "### 信用卡欺诈检测" }, { "alpha_fraction": 0.631436288356781, "alphanum_fraction": 0.6395664215087891, "avg_line_length": 36, "blob_id": "55042d4174a48296a5a089ba2222e5f9adf1b1a6", "content_id": "29ac9b567f12e4defad283d7b23c56f6a28ca5b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/recomend/movielens_tensorflow.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\ndef model(user_batch, item_batch, user_num, item_num, dim=5, device='/cpu:0'):\n with tf.device('/cpu:0'):\n with tf.variable_scope('lsi', reuse=True):\n bias_global = tf.get_variable('bias_global', shape=[])\n\n w_bias_user = tf.get_variable('embd_bias_user', shape=[user_num])" }, { "alpha_fraction": 0.6476739048957825, "alphanum_fraction": 0.6547161936759949, "avg_line_length": 44.47572708129883, "blob_id": "c301368c98e485fc31336acfccef8b1124594f92", "content_id": "71e2a4a364fccd02458b99abd76e82f0d629061d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4686, "license_type": "no_license", "max_line_length": 121, "num_lines": 103, "path": "/ml/creditcard/creditcard_tf.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.metrics import confusion_matrix, recall_score, classification_report\nfrom sklearn.preprocessing import StandardScaler\n\n\ndata = pd.read_csv('./creditcard.csv')\ncount_class = pd.value_counts(data['Class'], sort=True)\ncount_class.plot(kind='bar')\nplt.title(\"Fraud class histogram\")\nplt.xlabel(\"Class\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nscaler = StandardScaler()\ndata['normAmount'] = scaler.fit_transform(data['Amount'].values.reshape(-1, 1))\ndata = data.drop(['Time', 'Amount'], axis=1)\nprint(data.head())\n\nX = data.iloc[:, data.columns != 'Class']\ny = data.iloc[:, data.columns == 'Class']\n\nnumber_records_fraud = len(data[data.Class == 1])\nfraud_indicies = np.array(data[data.Class == 1].index)\n\nnormal_indicies = data[data.Class == 0].index\n\nrandom_normal_indicies = np.random.choice(normal_indicies, number_records_fraud, replace=False)\nrandom_normal_indicies = np.array(random_normal_indicies)\n\nunder_sample_indicies = np.concatenate([fraud_indicies, random_normal_indicies])\nunder_sample_data = data.iloc[under_sample_indicies, :]\n\nX_under_sample_data = under_sample_data.iloc[:, under_sample_data.columns != 'Class']\ny_under_sample_data = under_sample_data.iloc[:, under_sample_data.columns == 'Class']\n\n# Showing ratio\nprint(\"Percentage of normal transactions: \", len(under_sample_data[under_sample_data.Class == 0])/len(under_sample_data))\nprint(\"Percentage of fraud transactions: \", len(under_sample_data[under_sample_data.Class == 1])/len(under_sample_data))\nprint(\"Total number of transactions in resampled data: \", len(under_sample_data))\n\nfrom sklearn.model_selection import train_test_split\n# Whole dataset\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\nprint(\"Number transactions train dataset: \", len(X_train))\nprint(\"Number transactions test dataset: \", len(X_test))\nprint(\"Total number of transactions: \", len(X_train)+len(X_test))\n\nX_train_undersample, X_test_undersample, y_train_undersample, y_test_undersample = \\\n train_test_split(X_under_sample_data, y_under_sample_data, test_size=0.3, random_state=0)\n\nprint(\"\")\nprint(\"Number transactions train dataset: \", len(X_train_undersample))\nprint(\"Number transactions test dataset: \", len(X_test_undersample))\nprint(\"Total number of transactions: \", len(X_train_undersample)+len(X_test_undersample))\n\n#Recall = TP/(TP+FN)\ndef print_Kfold_scores(x_train_data, y_train_data):\n fold = KFold(5, shuffle=False)\n c_param_range = [0.01, 0.1, 1, 10, 100]\n\n result_table = pd.DataFrame(index=range(len(c_param_range)), columns=['c_paramerter', 'Mean recall score'])\n result_table['c_parameter'] = c_param_range\n j = 0\n for c_param in c_param_range:\n print('-------------------------------------------')\n print('C parameter: ', c_param)\n print('-------------------------------------------')\n print('')\n recall_accs = []\n i = 0\n for train_index, validation_index in fold.split(y_train_data):\n lr = LogisticRegression(penalty='l2', C=c_param)\n lr.fit(x_train_data.iloc[train_index, :], y_train_data.iloc[train_index, :].values.ravel())\n y_pred_undersample = lr.predict(x_train_data.iloc[validation_index, :].values)\n\n recall_acc = recall_score(y_train_data.iloc[validation_index, :].values, y_pred_undersample)\n recall_accs.append(recall_acc)\n print('Iteration ', i, ': recall score = ', recall_acc)\n i += 1\n # The mean value of those recall scores is the metric we want to save and get hold of.\n result_table.ix[j, 'Mean recall score'] = np.mean(recall_accs)\n j += 1\n print('')\n print('Mean recall score ', np.mean(recall_accs))\n print('')\n\n result_table['Mean recall score'] = result_table['Mean recall score'].astype(\"float32\")\n print(result_table['Mean recall score'])\n print(type(result_table['Mean recall score']))\n print(result_table['Mean recall score'].idxmax())\n best_c = result_table.iloc[result_table['Mean recall score']].values\n # Finally, we can check which C parameter is the best amongst the chosen.\n print('*********************************************************************************')\n print('Best model to choose from cross validation is with C parameter = ', best_c)\n print('*********************************************************************************')\n return best_c\n\nbest_c = print_Kfold_scores(X_train_undersample, y_train_undersample)\n\n\n" }, { "alpha_fraction": 0.46127739548683167, "alphanum_fraction": 0.5763652920722961, "avg_line_length": 34.239131927490234, "blob_id": "c540d309b7d1b47b9dad2c7ab8a96a083d07b61b", "content_id": "b69906543c70bc852e9549987823d9cd55c9695b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3565, "license_type": "no_license", "max_line_length": 148, "num_lines": 92, "path": "/graph/matplotlib_01.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n#这一个程序主要是几个简单的绘图软件\nimport matplotlib.pyplot as plt\nimport csv\ndef drawLine():\n x=[1,3,5,7,9,11,13,15]\n y=[12.3,12.4,12.8,13.09,14.56,13.67,15.012,14.177]\n plt.plot(x,y,label=\"First Line\",color=\"g\")\n plt.plot([2, 4, 6, 8, 10, 12, 14, 16], [13.3, 13.4, 13.8, 14.09, 15.56, 14.67, 16.012, 15.177], label=\"Second Line\",\n color=\"r\")\n plt.bar(x,y,label=\"BarData\",color=\"g\")\n plt.bar([2,4,6,8,10,12,14,16],[13.3,13.4,13.8,14.09,15.56,14.67,16.012,15.177], label=\"BarDaRta\", color=\"r\")\n plt.legend()#legend函数生成默认图例\n plt.xlabel(\"次数\")\n print(\"次数\")\n plt.ylabel(\"数量\")\n plt.title(\"2017年10月21日\")\n plt.show()\ndef drawHist():\n population_ages = [22, 55, 62, 45, 21, 22, 34, 42, 42, 4, 99,14,15, 102, 110, 120, 121, 122, 130, 111, 115, 112, 80, 75, 65, 54, 44, 43, 42, 48]\n bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130]\n plt.hist(population_ages, bins, histtype='bar', rwidth=0.9)\n #hist()函数绘制直方图,要传递的参数为hist(总的数据列表,数据区间梯度,直方图histtype = \"bar\",直方图条宽度rwidth= )\n plt.xlabel('x')#绘制x轴所代表的变量\n plt.ylabel('y')#绘制y轴所代表的变量\n plt.title('Interesting Graph\\nCheck it out')\n plt.legend()\n plt.show()\ndef drawScatter():\n x=[1,2,3,4,5,6,7,8]\n y=[5,2,4,6,12,11,9,12]\n plt.scatter(x,y,color=\"r\",s=100,marker=\"+\")\n #x,y是坐标,color是set color,'s' is set the size of point,'marker' is used to set the style of the point\n plt.title(\"Scatter Photo\")\n plt.legend()\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.xlim(0,10)\n plt.ylim(0,15)\n plt.show()\ndef drawStackPlot():\n days = [1.0,2.0,3.0,4.0,5.0,6.0,7.0]\n sleeping=[6.0,6.5,6.2,6.9,7.0,7.1,6.2]\n eating= [1.0,1.2,0.9,0.8,1.0,1.1,1.2]\n learing= [9.2,9.3,9.1,7.0,5.1,6.8,8.1]\n working= [2.0,2.4,4.0,3.2,1.9,0.8,0.5]\n playing= [5.8,4.6,3.8,6.1,9.0,8.2,8.0]\n plt.plot([],[],label=\"sleeping\",color='y',linewidth=\"5\")\n plt.plot([],[],label=\"eating\", color='r', linewidth=\"5\")\n plt.plot([],[],label=\"study\", color='m', linewidth=\"5\")\n plt.plot([],[],label=\"working\", color='k', linewidth=\"5\")\n plt.plot([],[],label=\"plaing\", color='c', linewidth=\"5\")\n plt.stackplot(days, sleeping,eating,learing,working,playing,colors=['y','r','m','k','c'])\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.legend()\n plt.show()\ndef drawPie():\n slices=[2,7,7,12]#数据\n activities=['sleeping','eating','working','playing']#设置标签属性\n colors=['c','m','r','b']\n\n plt.pie(slices,\n labels=activities,\n colors=colors,\n startangle=90,#开始绘制饼图的角度,按照逆时针绘制\n shadow=True,#阴影\n explode=(0.1, 0.0, 0.2, 0),#将第一个切片拉出0.1,\n autopct='%d%%')#将百分比放上\n plt.title('Interesting Graph\\nCheck it out')\n plt.show()\ndef drawbyfilefata():\n x=[]\n y=[]\n with open(\"value.csv\",\"r\") as csvfile:\n plots = csv.reader(csvfile,delimiter=\",\")\n for plot in plots:\n x.append(float(plot[0]))\n y.append(float(plot[1]))\n plt.plot(x,y,label=\"csv File value\",color=\"r\")\n plt.title(\"CSV FILE DATA\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.legend()\n plt.show()\n\n#drawLine() 绘制直线图&绘制条形图\n#drawHist() 绘制直方图\ndrawScatter()#绘制散点图\n#drawStackPlot()#绘制堆叠图\n#drawPie()\n#drawbyfilefata()" }, { "alpha_fraction": 0.5297029614448547, "alphanum_fraction": 0.5396039485931396, "avg_line_length": 20.263158798217773, "blob_id": "1e195098614e1caa3833c86e8fcde03581f451f6", "content_id": "5546aed72051cca8633957202707df509f827df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 42, "num_lines": 19, "path": "/reinforcement_learning/policy_iterate.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 策略迭代方法\n\nclass PolicyIterate(object):\n def __init__(self):\n self.v = []\n\n def policy_ietrate(self, grid_mdp):\n \"\"\"\n :type grid_mdp: GridEnv\n \"\"\"\n for i in range(1000):\n self.policy_evaluate(grid_mdp)\n self.policy_improve(grid_mdp)\n\n def policy_evaluate(self, grid_mdp):\n pass\n\n def policy_improve(self, grid_mdp):\n pass\n" }, { "alpha_fraction": 0.4308958053588867, "alphanum_fraction": 0.46636196970939636, "avg_line_length": 39.51852035522461, "blob_id": "4566cb3a429e3ea58d3d0bcee9539c41c1450a43", "content_id": "a86846298d71f2026f53c469e833ca5f49c864d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5470, "license_type": "no_license", "max_line_length": 149, "num_lines": 135, "path": "/reinforcement_learning/SR/dsr_maze_env.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport numpy as np\nimport time\n\nUNIT = 40 # pixels\nMAZE_H = 8 # grid height\nMAZE_W = 8 # grid width\n\n\nclass DSRMaze(tk.Tk, object):\n def __init__(self, title='maze'):\n super(DSRMaze, self).__init__()\n self.action_space = ['u', 'd', 'l', 'r']\n self.action_size = len(self.action_space)\n self.feature_size = 2\n self.title(title)\n self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))\n self._hells = []\n self._build_maze()\n\n self.state_size = 2\n self.action_size = 4\n self.done = False\n\n def _build_maze(self):\n self.canvas = tk.Canvas(self, bg='white', height=MAZE_H * UNIT, width=MAZE_W * UNIT)\n # draw line\n for r in range(0, UNIT * MAZE_H, UNIT):\n x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r\n self.canvas.create_line(x0, y0, x1, y1)\n for c in range(0, MAZE_W * UNIT, UNIT):\n x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT\n self.canvas.create_line(x0, y0, x1, y1)\n\n # create hell\n hell_origin = np.array([[20 + UNIT * 3, 20 + UNIT * 1],\n [20 + UNIT * 3, 20 + UNIT * 2],\n [20 + UNIT * 3, 20 + UNIT * 3],\n [20 + UNIT * 1, 20 + UNIT * 5],\n [20 + UNIT * 1, 20 + UNIT * 6],\n [20 + UNIT * 4, 20 + UNIT * 5],\n [20 + UNIT * 5, 20 + UNIT * 5],\n [20 + UNIT * 6, 20 + UNIT * 5],\n [20 + UNIT * 5, 20]])\n for i in range(hell_origin.shape[0]):\n hell_center = hell_origin[i]\n hell = self.canvas.create_rectangle(hell_center[0] - 15, hell_center[1] - 15,\n hell_center[0] + 15, hell_center[1] + 15,\n fill='black')\n self._hells.append(hell)\n\n goal_origin = np.array([20 + UNIT * 7, 20 + UNIT * 1])\n self.goal = self.canvas.create_oval(goal_origin[0] - 15, goal_origin[1] - 15,\n goal_origin[0] + 15, goal_origin[1] + 15,\n fill='yellow')\n agent_origin = np.array([20, 20])\n self.agent = self.canvas.create_rectangle(agent_origin[0] - 15, agent_origin[1] - 15,\n agent_origin[0] + 15, agent_origin[1] + 15, fill='red')\n self.canvas.pack()\n\n def reset(self):\n self.update()\n self.end = False\n self.canvas.delete(self.agent)\n origin = np.array([20, 20])\n self.agent = self.canvas.create_rectangle(origin[0] - 15, origin[1] - 15,\n origin[0] + 15, origin[1] + 15,\n fill='red')\n self.done = False\n # return state: the distance about agent position for goal position.\n return (np.array(self.canvas.coords(self.agent)[:2]) - np.array(self.canvas.coords(self.goal)[:2])) / (MAZE_H * UNIT)\n\n def step(self, action):\n # agent's position\n s = self.canvas.coords(self.agent)\n base_action = np.array([0, 0])\n reward = 0\n if action == 0 or action == 'u': # up\n if s[1] > UNIT and not self.is_hell(s[0], s[1] - UNIT):\n base_action[1] -= UNIT\n else:\n reward = -1\n elif action == 1 or action == 'd': # down\n if s[1] < (MAZE_H - 1) * UNIT and not self.is_hell(s[0], s[1] + UNIT):\n base_action[1] += UNIT\n else:\n reward = -1\n elif action == 2 or action == 'r': # right\n if s[0] < (MAZE_W - 1) * UNIT and not self.is_hell(s[0] + UNIT, s[1]):\n base_action[0] += UNIT\n else:\n reward = -1\n elif action == 3 or action == 'l': # left\n if s[0] > UNIT and not self.is_hell(s[0] - UNIT, s[1]):\n base_action[0] -= UNIT\n else:\n reward = -1\n self.canvas.move(self.agent, base_action[0], base_action[1]) # move agent\n self.update()\n next_coords = self.canvas.coords(self.agent) # next state\n if next_coords == self.canvas.coords(self.goal):\n reward = 1\n done = True\n else:\n if reward != -1:\n reward = 0\n done = False\n s_ = np.expand_dims((np.array(next_coords[:2]) - np.array(self.canvas.coords(self.goal)[:2]))/(MAZE_H*UNIT), axis=0)\n self.done = done\n return s_, reward, done\n\n def is_hell(self, x, y):\n for hell in self._hells:\n hell_coord = self.canvas.coords(hell)\n if hell_coord[0] == x and hell_coord[1] == y:\n return True\n return False\n\n def render(self):\n self.update()\n\n def get_current_state(self):\n return np.expand_dims((np.array(self.canvas.coords(self.agent)[:2]) - np.array(self.canvas.coords(self.goal)[:2])) / (MAZE_H * UNIT), axis=0)\n\n\n\nif __name__ == '__main__':\n q_maze = DSRMaze()\n hell = q_maze.is_hell(0, 0)\n while not hell:\n q_maze.step('r')\n hell = q_maze.is_hell(q_maze.canvas.coords(q_maze.agent)[0], q_maze.canvas.coords(q_maze.agent)[1])\n q_maze.update()\n time.sleep(1)\n q_maze.mainloop()\n" }, { "alpha_fraction": 0.48379889130592346, "alphanum_fraction": 0.4871508479118347, "avg_line_length": 34.4455451965332, "blob_id": "47bf4a8c29d137c32e00e8651486b61f5c0fb4d7", "content_id": "d9673b18b703ce1546932a37250be19b947d8cff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3936, "license_type": "no_license", "max_line_length": 94, "num_lines": 101, "path": "/operate_sys/page_algorithm.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 页面置换算法\nfrom collections import deque\n\nclass Node(object):\n def __init__(self, page):\n self.page = page\n self.count = 1 # 被访问次数\n\n def __str__(self):\n return str(self.page)\n\nclass PageAlgorithm(object):\n def __init__(self, phy_size=3):\n self.length = 0\n self.phy_size = phy_size\n self.queue = deque() # FIFO算法数据结构\n self.lru_structure = []\n\n def FIFO(self):\n while True:\n page_num = input(\"请输入要访问的页面号:\")\n if page_num == -1 or page_num == '-1':\n break\n else:\n if (not self._in_queue(page_num)) and (self.length < self.phy_size):\n self.queue.append(Node(page_num))\n self.length += 1\n print(str(page_num) + \" 页 缺页, 页面加入物理快,当前物理快中的页面有:\",end='')\n self._print_queue()\n elif(not self._in_queue(page_num)) and (self.length >= self.phy_size):\n self.queue.popleft()\n self.queue.append(Node(page_num))\n print(str(page_num) + \"页 缺页, 执行FIFO算法, 当前物理快中的页面有:\", end='')\n self._print_queue()\n else:\n print(str(page_num) + \"页 不缺页, 当前物理快中的页面有:\", end='')\n self._print_queue()\n print()\n def LRU(self):\n while True:\n page_num = input(\"请输入要访问的页面号:\")\n if page_num == -1 or page_num == '-1':\n break\n else:\n if (not self._in_lru_structure(page_num)) and (self.length < self.phy_size):\n self.lru_structure.append(Node(page_num))\n self.length += 1\n print(str(page_num) + \" 页 缺页, 页面加入物理快,当前物理快中的页面有:\\n页面号\\t访问次数\")\n self._print_lry_structure()\n\n elif(not self._in_lru_structure(page_num)) and (self.length >= self.phy_size):\n min_indicies = self._min_count_indices()\n self.lru_structure.pop(min_indicies)\n self.lru_structure.append(Node(page_num))\n print(str(page_num) + \"页 缺页, 执行LRU算法, 当前物理快中的页面有:\\n页面号\\t访问次数\")\n self._print_lry_structure()\n\n else:\n self._count_add_one(page_num)\n print(str(page_num) + \"页 不缺页, 当前物理快中的页面有:\\n页面号\\t访问次数\")\n self._print_lry_structure()\n print()\n\n def _in_queue(self, page_num):\n for node in self.queue:\n if page_num == node.page:\n return True\n return False\n \n def _in_lru_structure(self, page_num):\n for node in self.lru_structure:\n if page_num == node.page:\n return True\n return False\n \n def _min_count_indices(self):\n min = float('inf')\n min_indicies = -1\n for i in range(len(self.lru_structure)):\n if self.lru_structure[i].count < min:\n min = self.lru_structure[i].count\n min_indicies = i\n return min_indicies\n\n def _count_add_one(self, page_num):\n for node in self.lru_structure:\n if page_num == node.page:\n node.count += 1\n break\n\n def _print_queue(self):\n for i in self.queue:\n print(str(i.page) + \"\\t\" + str(i.count))\n\n def _print_lry_structure(self):\n for i in self.lru_structure:\n print(str(i.page) + \"\\t\" + str(i.count))\nif __name__ == '__main__':\n page = PageAlgorithm(3)\n # page.FIFO()\n page.LRU()\n" }, { "alpha_fraction": 0.6471251249313354, "alphanum_fraction": 0.6956031322479248, "avg_line_length": 25.84848403930664, "blob_id": "10a882b3f4b51751039616a4dd8332d143ee9e57", "content_id": "49c01a42a9c0eaedf6fb9336e2337eda83a3354d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 937, "license_type": "no_license", "max_line_length": 111, "num_lines": 33, "path": "/InterNET/request_test01.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import json\nimport requests\n#通过get方法发送请求\ndef requestsTest1():\n\tr = requests.get(\"http://jwxt.hbuas.edu.cn/jsxsd/xsxk/xsxk_index?jx0502zbid=5FAF2F7FF31F43B3BFB896BFD1D9FF17\")\n\tcontent = r.text\n\tprint(content)\n\ndef requseTest2():\n\tmyparam = {'wd':'Linux'}\n\tr = requests.get('https://www.baidu.com/s',params = myparam)\n\tprint(r.url)\n\tprint (r.text,end=\"\")\n\n#无限注册外挂\ndef request_post():\n\tmydata = {'userAccount':'2016117119','userPassword':'13797763577wh'}\n\tr = requests.post('http://jwxt.hbuas.edu.cn/jsxsd/',data = mydata)\n\tprint(r.text)\n\n#传输json格式的数据\ndef request_json():\n\tmydata = {'username':'683602165','password':'123142342432'}\n\tr = requests.post('http://jwxt.hbuas.edu.cn/jsxsd/xsxk',data = json.dumps(mydata))\n\tprint (r.text)\n\n#上传文件\ndef request_files():\n\tmyfile = {'file':open('106.jpg','rb')}\n\tr = requests.post('http://httpbin.org/post',files = myfile)\n\tprint (r.text)\n\nrequest_post()\n\n" }, { "alpha_fraction": 0.5680100917816162, "alphanum_fraction": 0.5799748301506042, "avg_line_length": 37.73170852661133, "blob_id": "bca1e6dc2e7880486a55e6cd68f74992818a3dba", "content_id": "59d13bbefe7c843eba8138d174b6f5a018f4f05a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1588, "license_type": "no_license", "max_line_length": 95, "num_lines": 41, "path": "/softmax/model.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, print_function, division\n\nimport numpy as np\nfrom .softmax_loss import Softmax\n\nclass Model:\n\n def __init__(self):\n self.input = None\n self.target = None\n self.w = None\n self.loss_history = None\n\n def train(self, x, y, learning_rate=1e-3, reg=1e-5, num_iter=100, batch_size=10, verbos=0):\n \"\"\"\n Training model.\n :param x: input value, shape:(N, D), N: the values of sample, D:the sample's dimension.\n :param y: target value, shape:(N,)\n :param learning_rate: learning rate, default: 1e-3\n :param reg: regularization coefficient.\n :param num_iter: train numbers.\n :param batch_size: train batch's size.\n :param verbos: if verbos=0, training process can not visualized.\n :return: loss history, type: list\n \"\"\"\n num_train, num_dim = np.asarray(x, dtype=np.int).shape\n num_class = int(np.max(y) + 1)\n if self.w is None:\n self.w = np.random.randn(num_dim, num_class)\n loss_history = []\n for i in range(num_iter):\n indices = np.random.choice(num_train, batch_size,False)\n x_batch = x[indices, :]\n y_batch = y[indices]\n loss, gradient = Softmax.softmax_loss_matrix(self.w, x_batch, y_batch, reg)\n loss_history.append(loss)\n self.w = self.w - learning_rate * gradient\n if verbos>0 and i % 100 == 0:\n print(\"iter: %d\\t loss:%f\"%(i, loss))\n self.loss_history = loss_history\n return loss_history\n" }, { "alpha_fraction": 0.5548705458641052, "alphanum_fraction": 0.5733662247657776, "avg_line_length": 25.96666717529297, "blob_id": "7a922e9846d905c40c874fe3f40d035bb8898977", "content_id": "df473ce5f54b36149cd932e4e59d3aa627f3fffa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 71, "num_lines": 30, "path": "/ml/cluster/KMeans.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\nclass KMeans(object):\n def __init__(self):\n pass\n\n @staticmethod\n def cluster(k, data):\n clusters = np.zeros([k, data.shape[1]])\n result = np.zeros([data.shape[0]])\n for i in range(k):\n clusters[i] = data[i]\n for i in range(1000):\n for i in range(data.shape[0]):\n result[i] = KMeans.distance(data[i], clusters)\n\n\n @staticmethod\n def distance(data, cluster):\n res = []\n for point in cluster:\n res.append(np.sqrt(np.dot((data - point), (data - point))))\n return np.argmin(res)\n\nimage = cv2.imread(\"./k-means.png\", cv2.IMREAD_ANYCOLOR)\nrows, cols = image.shape[0], image.shape[1]\ndata = image.reshape(-1, 3)\nprint(data.shape, type(data))\nKMeans.cluster(4, data)\n\n\n" }, { "alpha_fraction": 0.5355821251869202, "alphanum_fraction": 0.5562567710876465, "avg_line_length": 40.03571319580078, "blob_id": "d60be0654380225b33a180671efb6372ef758779", "content_id": "e058c5825557982bae57ffd4e4364e1a446d6f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4621, "license_type": "no_license", "max_line_length": 121, "num_lines": 112, "path": "/nn/dense_network.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 堆叠自动编码机\n# Stacking automatic encoder\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\n\nclass NeuralNetwork:\n TRAIN_MODEL_NEW=1\n TRAIN_MODEL_CONTINUE=2\n\n def __init__(self):\n self.hidden_size=1024\n self.n = 784\n self.regcoef = 5e-4\n self.tf_graph = tf.Graph()\n self.num_epochs = 200\n self.corr_frac = 0.1 # 加入噪音比例\n self.batch_size = 128\n self.X = None\n self.y = None\n self.y_ = None\n self.keep_prob = None\n self.W1 = None\n self.b2 = None\n self.b3 = None\n self.a2 = None\n self.J = None\n self.train_op = None\n\n\n def load_dataset(self):\n mnist = input_data.read_data_sets('/home/wangheng/workspace/PycharmProjects/MNIST_data', one_hot=True)\n X_train = mnist._train_op.images\n y_train = mnist._train_op.labels\n X_test = mnist.test.images\n y_test = mnist.test.labels\n X_validation = mnist.validation.images\n y_validation = mnist.validation.labels\n return X_train, y_train, X_test, y_test, X_validation, y_validation\n\n def add_noise(self, sess, X, corr_frac):\n X_prime = X.copy()\n rand = tf.random_uniform(X.shape)\n X_prime[sess.run(tf.nn.relu(tf.sign(corr_frac - rand))).astype(np.bool)] = 0\n return X_prime\n\n def get_mini_batchs(self, X, batch_size):\n X = np.array(X)\n length = X.shape[0]\n for i in range(0, length, batch_size):\n if i+batch_size < length:\n yield X[i: i+batch_size]\n else:\n yield X[i: ]\n\n def build_model(self):\n print(\"Building auto-denoising Autoencoder Model v 1.0\")\n print(\"Beginning to build the model\")\n self.X = tf.placeholder(dtype=tf.float32, shape=[None, self.n])\n self.y = tf.placeholder(dtype=tf.float32, shape=[None, self.n])\n self.keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob')\n self.W1 = tf.Variable(initial_value=tf.truncated_normal(shape=[self.n, self.hidden_size], mean=0.1, stddev=0.1),\\\n name='w1')\n self.b2 = tf.Variable(tf.constant(0.001, shape=[self.hidden_size]), name='b2')\n self.b3 = tf.Variable(tf.constant(0.001, shape=[self.n]), name='b3')\n with tf.name_scope('encoder'):\n z2 = tf.matmul(self.X, self.W1) + self.b2\n self.a2 = tf.nn.tanh(z2)\n with tf.name_scope('decoder'):\n z3 = tf.matmul(self.a2, tf.transpose(self.W1)) + self.b3\n a3 = tf.nn.tanh(z3)\n self.y_ = a3\n r_y_ = tf.clip_by_value(self.y_, 1e-10, float('inf'))\n r_1_y = tf.clip_by_value(1-self.y_, 1e-10, float('inf'))\n cost = tf.reduce_mean(tf.add(tf.multiply(self.y, tf.log(r_y_)),\\\n tf.multiply(tf.subtract(1.0, self.y), tf.log(r_1_y))))\n self.J = cost + self.regcoef * tf.nn.l2_loss([self.W1])\n self.train_op = tf.train.AdamOptimizer(0.0001).minimize(self.J)\n\n def train(self, model=TRAIN_MODEL_NEW, ckpt_file='work/dae.ckpt'):\n X_train, y_train, X_test, y_test, X_validation, y_validation = self.load_dataset()\n with self.tf_graph.as_default():\n self.build_model()\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(self.num_epochs):\n X_train_prime = self.add_noise(sess, X_train, self.corr_frac)\n shuff = list(zip(X_train, X_train_prime))\n np.random.shuffle(shuff)\n batches = [_ for _ in self.get_mini_batchs(shuff, self.batch_size)]\n batch_idx = 0\n for batch in batches:\n X_batch_raw, X_prime_batch_raw = zip(*batch)\n X_batch = np.array(X_batch_raw).astype(np.float32)\n X_prime_batch = np.array(X_prime_batch_raw).astype(np.float32)\n batch_idx += 1\n\n opv, loss = sess.run([self.train_op, self.J], feed_dict={self.X: X_prime_batch, self.y: X_batch})\n if batch_idx % 100 == 0:\n print('eposh{0}_batch{1}: {2}'.format(epoch, batch_idx, loss))\n saver.save(sess, ckpt_file)\n \n def run(self, ckpt_file='work/dae.ckpt'):\n img_file = 'datasets/test'\n\nif __name__ == '__main__':\n nn = NeuralNetwork()\n nn.train()" }, { "alpha_fraction": 0.6306933760643005, "alphanum_fraction": 0.6553524732589722, "avg_line_length": 34.91666793823242, "blob_id": "fc33e7abc427940132310e8a49d3b82f4954a689", "content_id": "9334456d6d9670e09c27b7e400ab0d1d3c20c27e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3447, "license_type": "no_license", "max_line_length": 109, "num_lines": 96, "path": "/ml/ensemble_learning/ensambleLearning.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom joblib import dump, load\nfrom tensorflow.python.keras.callbacks import TensorBoard\nimport tensorflow as tf\nfrom tensorflow.python.keras.layers import Dense, Dropout\nfrom tensorflow.python.keras.models import Sequential\nimport pandas as pd\nimport numpy as np\n\n# dataset link: https://www.kaggle.com/ronitf/heart-disease-uci/downloads/heart-disease-uci.zip/1\n\ndef prepare_data():\n original_data = pd.read_csv(\"./heart.csv\")\n data = original_data.values\n data[:, 0] = data_normalization(data[:, 0])\n data[:, 3] = data_normalization(data[:, 3])\n data[:, 4] = data_normalization(data[:, 4])\n data[:, 7] = data_normalization(data[:, 7])\n\n X_ = data[:, :-1]\n y_ = data[:, -1]\n x_train, x_test, y_train, y_test = train_test_split(X_, y_, test_size=0.2, shuffle=False, random_state=0)\n return x_train, x_test, y_train, y_test\n\ndef data_normalization(data, method='max-min'):\n if method == 'max-min':\n max_value, min_value = max(data), min(data)\n data = (data - np.repeat(min_value, data.shape[0])) / (max_value - min_value)\n return data\n\n elif method == 'z-zero':\n mean = np.mean(data, axis=0)\n std = np.std(data, axis=0)\n return (data - np.repeat(mean, data.shape[0])) / std\n\ndef build_multi_model(x_train, x_test, y_train, y_test):\n model1 = DecisionTreeClassifier()\n model2 = KNeighborsClassifier(2)\n model3 = LogisticRegression(max_iter=10000,solver='liblinear', tol=1e-8)\n\n model1.fit(x_train, y_train)\n model2.fit(x_train, y_train)\n model3.fit(x_train, y_train)\n\n dump(model1, \"model1.joblib\")\n dump(model2, \"model2.joblib\")\n dump(model3, \"model3.joblib\")\n\n pre1 = model1.predict(x_test)\n pre2 = model2.predict(x_test)\n pre3 = model3.predict(x_test)\n\n pre = np.zeros(pre1.shape)\n for i in range(0, pre1.shape[0]):\n if pre1[i] + pre2[i] + pre3[i] >= 2:\n pre[i] = 1\n else:\n pre[i] = 0\n\n print(y_test[0:30], pre3[0:30])\n print(accuracy_score(y_test, pre1))\n print(accuracy_score(y_test, pre2))\n print(accuracy_score(y_test, pre3))\n print(accuracy_score(y_test, pre))\n\ndef build_nn_model(x_train, x_test, y_train, y_test):\n model = Sequential()\n model.add(Dense(input_shape=x_train[0].shape, units=13))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(2, activation='softmax'))\n model.compile(optimizer=tf.train.AdamOptimizer(),loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n print(model.summary())\n model.fit(x_train, y_train, batch_size=32, epochs=1000,callbacks=[TensorBoard('./logs')])\n model.save('./nn_model.h5')\n test_loss, test_acc = model.evaluate(x_test, y_test)\n print(test_acc)\n\n\ndef load_model(x_test, y_test):\n model = load('./model3.joblib')\n result = model.predict(x_test)\n print(accuracy_score(y_test, result))\n\nif __name__ == \"__main__\":\n x_train, x_test, y_train, y_test = prepare_data()\n # build_multi_model(x_train, x_test, y_train, y_test)\n # load_model(x_test, y_test)\n build_nn_model(x_train, x_test, y_train, y_test)" }, { "alpha_fraction": 0.48122867941856384, "alphanum_fraction": 0.5336642861366272, "avg_line_length": 31.816326141357422, "blob_id": "03690ca13d3e4c5de6acc06e28d1f5e52cfffb04", "content_id": "b53ee6897f6cb4c560d2ba7a0fff56f4d11af9ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3885, "license_type": "no_license", "max_line_length": 108, "num_lines": 98, "path": "/KNN/mySeries.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding utf-8 -*-\nimport pandas as pd\nimport numpy as np\n\n\n\ndef dataframe1():\n s = pd.Series([-12, -34, 90, 34], index=['e', 'f', 'g', 'h'])\n print(s)\n print(s.values)\n print(s.index)\n print(s[2]) # 输出第二个元素\n print(s['g']) # 根据索引找到g多对应的元素的值\n print(s[['e', 'h']]) # 输出e ,h多对应的值,包括索引,以及values\n s['f'] = 55 # 为元素复制\n print(s)\n\n data1 = np.arange(4, 10)\n ss = pd.Series(data1)\n print(ss)\n data2 = np.arange(3, 7) # 产生3到7的一维数组\n sss = pd.Series(data2) # a将数组的值作为series索引的值\n print(sss)\n print(sss[3])\n print(sss[sss > 4]) # 筛选元素\n print(sss / 2) # Series对象的运算\n print(np.sin(sss)) # series\n print(sss.unique())\n # Series 用作字典\n mydict = {'red': 12, 'blue': 14, 'yello': 90, 'green': -78}\n print(mydict)\n print(pd.Series(mydict))\n colors = ['red', 'blue', 'yello', 'green']\n print(pd.Series(mydict, index=colors)) # 用字典的值来填充collors键\n print(\"=\" * 30)\n print(pd.Series(mydict, index=colors) + pd.Series(mydict))\n\n # ]]]]]]======dataFrame对象========[[[[[[[\n # dataFrame对象要传递给一个dict给DataFrame()构造函数以每一列的的名称作为键,每个键都有一个数组作为值\n mydict_1 = {'color': ['red', 'green', 'yellow', 'black'], 'object': ['blood', 'tree', 'video', 'pen'],\n 'value': [12, 34.8, 23.9, -11.98]}\n frame = pd.DataFrame(mydict_1) # 构建一个frameData对象\n print(frame)\n # 指定frameData对象的显示列\n frame_1 = pd.DataFrame(mydict_1, columns=['color', 'value'])\n print(frame_1)\n # 指定dataFrame的索引\n frame_2 = pd.DataFrame(mydict_1, index=['one', 'two', 'three', 'four'], columns=['color', 'value'])\n print(frame_2)\n # ========选取元素======\n print(frame_2.values)\n # ========选取index======\n print(frame_2.keys())\n # 选取某一个索引的值的两种方法\n print(frame_2['value'])\n print(frame_2.color)\n # ======获取某一行的元素,通过ix属性以及行的索引获取\n print(frame_2.ix['two'])\n # 通过ix获取多个索引值所对应得值.注意:要传递多个索引时,索引必须要用[]括起来\n print(frame_2.ix[['one', 'three']])\n # 选取DATa Frame 的一部分:\n print(frame[0:2])\n # 选取dataFrame的一个元素,第一个参数是 列 ,,第二个是 行\n print(frame['color'][2])\n # 添加一个新列\n frame['new'] = 13\n print(frame)\n\n # =================================================================\n # 更新某一列的数据,首先要构建一个Series对象,然后利用dataFrame对象设置\n ser = pd.Series(np.arange(4))\n frame['new'] = ser\n print(frame, '\\n')\n # 修改dataFrame单个元素\n frame['new'][1] = 12\n print(frame)\n # 用isin()判断是否为Series对象中的数据\n print(frame[frame.isin(['red', 12])])\n # 删除一整列\n del frame['new']\n print(frame)\n # dataFrame转置\n print(frame.T, '\\n')\n # Series更换索引\n s.reindex([1, 2, 3, 4])\n print(s)\n # ==============================计算两个序列的协方差,以及相关系数=========\n s = pd.Series([-12, -34, 90, 34], index=['e', 'f', 'g', 'h'])\n print(s)\n print(np.arange(5, 15))\n seq1 = pd.Series(np.array([3.45, 1.67, 5.56, 7.89, 9.01]), index=np.arange(1, 6))\n seq2 = pd.Series(np.arange(5, 10), index=np.arange(1, 6))\n print(seq1, '\\n', seq2, '\\n')\n print(\"协方差:\", '\\t', seq1.cov(seq2))\n print(\"相关系数:\", seq1.corr(seq2))\n frame = pd.DataFrame(\n [[1, 4, 5, 6], [8.9, 12.6, 19.98, 33.87], [23.11, 34.56, 22.45, 19.987], [-23.1, 90.7, 34.6, 89.4]],\n index=['a', 'b', 'c', 'd'], columns=['d1', 'd2', 'd3', 'd4'])\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4257287383079529, "alphanum_fraction": 0.49013790488243103, "avg_line_length": 36.67763137817383, "blob_id": "608bd211e1951a749f0ee9033e58dc1780e8190c", "content_id": "6374995f9fe10873795d61cdb58f523e8eb7bf51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5901, "license_type": "no_license", "max_line_length": 88, "num_lines": 152, "path": "/reinforcement_learning/grid_env.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 找金币MDP实例\n# |-------------------|\n# | 1 | 2 | 3 | 4 | 5 |\n# | x | | o | | x |\n# |-------------------|\n# 6 7 8\n\nimport gym\nimport time\n\n# class GridEnv(gym.Env):\n#\n# def __init__(self):\n# self.states = [1,2,3,4,5,6,7,8] #全部状态空间\n# self.actions = ['n', 'e', 's', 'w'] #全部动作空间\n# self.rewards = dict() #回报值\n# self.rewards['1_s'] = -1.0\n# self.rewards['3_s'] = 1.0\n# self.rewards['5_s'] = -1.\n# self.t = dict() #状态转移\n# self.t['1_s'] = 6\n# self.t['1_e'] = 2\n# self.t['2_w'] = 1\n# self.t['2_e'] = 3\n# self.t['3_s'] = 7\n# self.t['3_w'] = 2\n# self.t['3_e'] = 4\n# self.t['4_w'] = 3\n# self.t['4_e'] = 5\n# self.t['5_s'] = 8\n# self.t['5_w'] = 4\n# self.terminal_states = dict() # 终止状态\n# self.terminal_states[6] = 1\n# self.terminal_states[7] = 1\n# self.terminal_states[8] = 1\n#\n# self.x = [140, 220, 300, 380, 460, 140, 300, 460]\n# self.y = [250, 250, 250, 250, 250, 150, 150, 150]\n#\n# self.state = None\n# self.gamma = 0.8\n# self.viewer = None\n#\n# def step(self, action):\n# state = self.state\n# if state in self.terminal_states:\n# return state, 0, True, {} #返回下一时刻的状态,reward, 是否终止,调试信息\n# key = \"%d_%s\"%(state,action) #将状态-动作组成字典的键值\n# if key in self.t:\n# next_state = self.t[key]\n# else:\n# next_state = state\n# if next_state in self.terminal_states:\n# is_terminal = True\n# else:\n# is_terminal = False\n#\n# if key not in self.rewards:\n# r = 0\n# else:\n# r = self.rewards[key]\n# return next_state, r, is_terminal, {}\n#\n# def reset(self):\n# self.state = self.states[int(random.random() * len(self.states))]\n# return self.state\n#\n# def render(self, mode='human', close=False):\n# if close:\n# if self.viewer is not None:\n# self.viewer.close()\n# self.viewer = None\n# return\n# screen_width = 600\n# screen_height = 400\n# if self.viewer is None:\n# from gym.envs.classic_control import rendering\n# self.viewer = rendering.Viewer(screen_width, screen_height)\n# # 创建网格世界\n# self.line1 = rendering.Line((100, 300), (500, 300))\n# self.line2 = rendering.Line((100, 200), (500, 200))\n# self.line3 = rendering.Line((100, 300), (100, 100))\n# self.line4 = rendering.Line((180, 300), (180, 100))\n# self.line5 = rendering.Line((260, 300), (260, 100))\n# self.line6 = rendering.Line((340, 300), (340, 100))\n# self.line7 = rendering.Line((420, 300), (420, 100))\n# self.line8 = rendering.Line((500, 300), (500, 100))\n# self.line9 = rendering.Line((100, 100), (180, 100))\n# self.line10 = rendering.Line((260, 100), (340, 100))\n# self.line11 = rendering.Line((420, 100), (500, 100))\n# # 创建第一个骷髅\n# self.kulo1 = rendering.make_circle(40)\n# self.circletrans = rendering.Transform(translation=(140, 150))\n# self.kulo1.add_attr(self.circletrans)\n# self.kulo1.set_color(0, 0, 0)\n# # 创建第二个骷髅\n# self.kulo2 = rendering.make_circle(40)\n# self.circletrans = rendering.Transform(translation=(460, 150))\n# self.kulo2.add_attr(self.circletrans)\n# self.kulo2.set_color(0, 0, 0)\n# # 创建金条\n# self.gold = rendering.make_circle(40)\n# self.circletrans = rendering.Transform(translation=(300, 150))\n# self.gold.add_attr(self.circletrans)\n# self.gold.set_color(1, 0.9, 0)\n# # 创建机器人\n# self.robot = rendering.make_circle(30)\n# self.robotrans = rendering.Transform()\n# self.robot.add_attr(self.robotrans)\n# self.robot.set_color(0.8, 0.6, 0.4)\n#\n# self.line1.set_color(0, 0, 0)\n# self.line2.set_color(0, 0, 0)\n# self.line3.set_color(0, 0, 0)\n# self.line4.set_color(0, 0, 0)\n# self.line5.set_color(0, 0, 0)\n# self.line6.set_color(0, 0, 0)\n# self.line7.set_color(0, 0, 0)\n# self.line8.set_color(0, 0, 0)\n# self.line9.set_color(0, 0, 0)\n# self.line10.set_color(0, 0, 0)\n# self.line11.set_color(0, 0, 0)\n#\n# self.viewer.add_geom(self.line1)\n# self.viewer.add_geom(self.line2)\n# self.viewer.add_geom(self.line3)\n# self.viewer.add_geom(self.line4)\n# self.viewer.add_geom(self.line5)\n# self.viewer.add_geom(self.line6)\n# self.viewer.add_geom(self.line7)\n# self.viewer.add_geom(self.line8)\n# self.viewer.add_geom(self.line9)\n# self.viewer.add_geom(self.line10)\n# self.viewer.add_geom(self.line11)\n# self.viewer.add_geom(self.kulo1)\n# self.viewer.add_geom(self.kulo2)\n# self.viewer.add_geom(self.gold)\n# self.viewer.add_geom(self.robot)\n#\n# if self.state is None: return None\n# self.robotrans.set_translation(self.x[self.state - 1], self.y[self.state - 1])\n#\n# return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\nif __name__ == '__main__':\n env = gym.make(\"CartPole-v0\")\n env.reset()\n env.render()\n # env.env.close()\n print(env.step())\n time.sleep(10)\n env.close()\n\n\n" }, { "alpha_fraction": 0.44812220335006714, "alphanum_fraction": 0.5117759108543396, "avg_line_length": 20.81944465637207, "blob_id": "6f379de3aca23949f05edff3cff202d9190347f0", "content_id": "ee0480ae06ca41e512476f748999509d906c52dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2155, "license_type": "no_license", "max_line_length": 93, "num_lines": 72, "path": "/algorithm/dynamic_programming.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\n\nsub_fib = np.ones((100), np.int) * -1\n\n\n# 自顶向下的备忘录法\ndef fib(n, memory):\n if memory[n] != -1:\n return memory[n]\n if n <= 2:\n memory[n] = 1\n else:\n memory[n] = fib(n - 1, memory) + fib(n - 2, memory)\n return memory[n]\n\n\n# 自底向上动态规划\n# 备忘录法还是利用了递归,上面算法不管怎样,计算fib(6)的时候\n# 最后还是要计算出fib(1),fib(2),fib(3)……,那么何不先计算出\n# fib(1),fib(2),fib(3)……,呢?这也就是动态规划的核心,\n# 先计算子问题,再由子问题计算父问题。\n\ndef fib_1(n):\n if n <= 0:\n return n\n Memo = np.ones((100), np.int) * -1\n Memo[0] = 0\n Memo[1] = 1\n for i in range(2, n + 1):\n Memo[i] = Memo[i - 1] + Memo[i - 2]\n return Memo[n]\n\n\n# 自底向上方法也是利用数组保存了先计算的值,为\n# 后面的调用服务。观察参与循环的只有 i,i-1 , i-2\n# 三项,因此该方法的空间可以进一步的压缩如下。\ndef fib_2(n):\n if n <= 0:\n return n\n memo_i_2 = 0\n memo_i_1 = 1\n memo_i = 1\n for i in range(2, n + 1, 1):\n memo_i = memo_i_1 + memo_i_2\n memo_i_2 = memo_i_1\n memo_i_1 = memo_i\n return memo_i\n\n\n# 动态规划初步:数字三角形问题:有一个由非负整数组成的数字三角形,\n# 第一行只有一个参数,除了最下行之外,每一个数的左下方和右下方各\n# 有一个数,从第一行的数开始,走到最下面,如何走才能使得沿途经过\n# 的数字之和最大。\n# 状态转移方程:d(i,j) = a(i,j) + max(d(i+1, j), d(i+1, j+1))\n# 1. 递归算法\ndef solve(a, i, j, n):\n return a[i, j] + (0 if i == n else max(solve(a, i + 1, j, n), solve(a, i + 1, j + 1, n)))\n\n# 2. 递推算法\n# def solve2()\n\nif __name__ == '__main__':\n # print(fib(8, sub_fib))\n # print(fib_1(9))\n # print(fib_2(10))\n\n a = np.zeros((4, 4))\n a[0, 0] = 1\n a[1, 0], a[1, 1] = 3, 2\n a[2, 0], a[2, 1], a[2, 2] = 4, 10, 1\n a[3, 0], a[3, 1], a[3, 2], a[3, 3] = 4, 3, 2, 20\n print(solve(a, 0, 0, 3))\n" }, { "alpha_fraction": 0.49735450744628906, "alphanum_fraction": 0.5483405590057373, "avg_line_length": 52.30769348144531, "blob_id": "6e4610e2b08848976feec1f7208e83f7642868cb", "content_id": "f8c6b4a298cbaa008e0eb4ed9d6ff2d5bca11060", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2079, "license_type": "no_license", "max_line_length": 110, "num_lines": 39, "path": "/reinforcement_learning/DSR.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# Deep successor representation.\n\"\"\"\ntensorflow 1.5.x\n\"\"\"\nimport tensorflow as tf\n\nclass DSRBrain:\n def __init__(self,action_num, sess: tf.Session, output_graph=False):\n\n self.action_num = action_num\n self.s_t = tf.placeholder(tf.float32, [None, 40, 40, 1], 's_t')\n if sess is None:\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.sess = sess\n if output_graph:\n tf.summary.FileWriter(\"./dsr_log/\", self.sess.graph)\n\n def build_network_architecture(self):\n with tf.variable_scope(\"f\"):\n with tf.variable_scope(\"conv_1\"):\n w1 = tf.get_variable('w1', [8, 8, 1, 32], tf.float32, tf.truncated_normal_initializer())\n b1 = tf.get_variable('b1', [32], tf.float32, tf.truncated_normal_initializer())\n l1 = tf.nn.relu(tf.add(tf.nn.conv2d(self.s_t, w1, [1, 2, 2, 1], 'SAME'), b1))\n with tf.variable_scope('conv_2'):\n w2 = tf.get_variable(\"w2\", [4, 4, 32, 64], tf.float32, tf.truncated_normal_initializer())\n b2 = tf.get_variable('b2', [64], tf.float32, tf.truncated_normal_initializer())\n l2 = tf.nn.relu(tf.add(tf.nn.conv2d(l1, w2, [1, 1, 1, 1], 'SAME'), b2))\n with tf.variable_scope('conv_3'):\n w3 = tf.get_variable('w3', [3, 3, 64, 64], tf.float32, tf.truncated_normal_initializer())\n b3 = tf.get_variable('b2', [64], tf.float32, tf.truncated_normal_initializer())\n l3 = tf.nn.relu(tf.add(tf.nn.conv2d(l2, w3, [1, 1, 1, 1], 'SAME'), b3))\n with tf.variable_scope('fc1'):\n flatten = tf.reshape(l3, [-1, 64 * 3 * 3])\n w_r = tf.get_variable('w_r', [64 * 3 * 3, 512], tf.float32, tf.truncated_normal_initializer())\n b_r = tf.get_variable('b_r', [512], tf.float32, tf.truncated_normal_initializer())\n fai_s = tf.nn.relu(tf.add(tf.matmul(flatten, w_r), b_r))\n with tf.variable_scope('R(s)'):\n w = tf.get_variable('w', [])\n" }, { "alpha_fraction": 0.5734952092170715, "alphanum_fraction": 0.5866701006889343, "avg_line_length": 36.582523345947266, "blob_id": "1c90ae79a09c2d9479cf567c13e4031e8ef052cc", "content_id": "8d8c30954040e5b7e21f935a68feecda4b7c667d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3987, "license_type": "no_license", "max_line_length": 102, "num_lines": 103, "path": "/reinforcement_learning/SR/dsr_simple_brain.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from tensorflow.keras.layers import Dense, Input\nfrom tensorflow import keras\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.losses import mean_squared_error\nimport tensorflow as tf\n\nimport numpy as np\n\nfrom dsr_maze_env import DSRMaze\n\n\nclass RL_Brain():\n def __init__(self, n_features, n_action, memory_size=10, batch_size=32, gamma=0.9, fi_size=8):\n self.n_features = n_features\n self.n_actions = n_action\n self.memory_size = memory_size\n self.replay_buffer = np.zeros((self.memory_size, n_features * 2 + 2), np.float)\n self.count = 0\n self.batch_size = batch_size\n self.gamma = gamma\n\n self.opt = RMSprop()\n\n # 由于我们输入的状态向量纬度非常小(仅仅2纬度,因此没有必要再搞个auto-encoder对state进行编码解码),我们直接将state值进行输入。\n self.input_states = Input((self.n_features,), name='input_states')\n\n self.branch_1_model = keras.Sequential([\n Input((2,)),\n Dense(1, None, False, name='R')\n ])\n\n self.branch_2_model = [keras.Sequential([\n Input((2,)),\n Dense(5, 'relu', name='mu/m%s/layer1' % i),\n Dense(5, 'relu', name='mu/m%s/layer2' % i),\n Dense(2, name='mu/m%s/layer3' % i)\n ], name='branch_%s' % i) for i in range(self.n_actions)]\n\n def learn_w(self, state, r):\n with tf.GradientTape() as tape:\n pred = self.branch_1_model(state)\n loss = mean_squared_error(r, pred)\n grads = tape.gradient(loss, self.branch_1_model.trainable_variables)\n self.opt.apply_gradients(zip(grads, self.branch_1_model.trainable_variables))\n\n def learn_mu(self, state, state_, action_index):\n w = self.branch_1_model.get_layer('R').get_weights()[0]\n mus_ = []\n for i in range(self.n_actions):\n mus_.append(self.branch_2_model[i](state_))\n mus_ = np.squeeze(mus_)\n max_index = np.argmax(np.squeeze(np.matmul(mus_, w)), axis=0)\n with tf.GradientTape() as tape:\n pred = self.branch_2_model[action_index](state)\n label = state + self.gamma * mus_[max_index]\n loss = mean_squared_error(label, pred)\n grads = tape.gradient(loss, self.branch_2_model[action_index].trainable_variables)\n self.opt.apply_gradients(zip(grads, self.branch_2_model[action_index].trainable_variables))\n self.branch_2_model[action_index] = self.branch_2_model[action_index]\n\n def choose_action(self, state, is_random=False):\n if is_random:\n return np.random.choice(self.n_actions)\n w = self.branch_1_model.get_layer('R').get_weights()[0]\n mus = []\n for i in range(self.n_actions):\n pred = self.branch_2_model[i](state)\n mus.append(pred)\n mus = np.squeeze(mus)\n rs = np.squeeze(np.matmul(mus, w))\n if len(set(rs)) == 1:\n action_index = np.random.choice(self.n_actions)\n else:\n action_index = np.argmax(rs)\n return action_index\n\n def append_to_replay_buffer(self, s, a, r, s_):\n transition = np.hstack([s, a, r, s_])\n self.replay_buffer[self.count % self.memory_size] = transition\n self.count += 1\n\n\nif __name__ == '__main__':\n eps = 100\n env = DSRMaze('dsr-maze')\n brain = RL_Brain(2, 4, memory_size=10000)\n\n c = 0\n for i in range(eps):\n state = env.get_current_state()\n done = False\n\n while not done:\n action_index = brain.choose_action(state)\n s_next, reward, done = env.step(action_index)\n brain.append_to_replay_buffer(np.squeeze(state), action_index, reward, np.squeeze(s_next))\n brain.learn_w(state, reward)\n # 训练第二部分:m_alpha\n brain.learn_mu(state, s_next, action_index)\n state = s_next\n # c += 1\n if done:\n env.reset()\n" }, { "alpha_fraction": 0.34337350726127625, "alphanum_fraction": 0.5120481848716736, "avg_line_length": 19.8125, "blob_id": "e8b7aec777a8f450a0d2f479d86445947319e5d2", "content_id": "72fb808c2c27688c27184e3063987715a36aa5ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 100, "num_lines": 16, "path": "/graph/matplotlib_02.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# -*- coding utf-8 -*-\nimport numpy as np\ndef test01():\n b=np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,17, 18, 19, 20, 21, 22, 23])\n print(b)\n b.shape = (6,4)\n print(b)\ndef test02():\n b = np.arange(0,50,1,int)\n c = b.reshape(5,10)\n d = c.T\n print(b)\n print(c)\n print(d)\n# test01()\ntest02()" }, { "alpha_fraction": 0.5598865151405334, "alphanum_fraction": 0.5820550322532654, "avg_line_length": 40.74561309814453, "blob_id": "22af867cf29caba14b7071a2e7dc610d7cb8bcdd", "content_id": "024c3ba90f4d787217058da00b071cacb919c428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9644, "license_type": "no_license", "max_line_length": 160, "num_lines": 228, "path": "/reinforcement_learning/SR/sr_drl_brain.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport utils\nfrom gridworld import SimpleGrid\nimport tensorflow._api.v2.compat.v1 as tf\nimport os\nimport shutil\ntf.disable_eager_execution()\ntf.disable_v2_behavior()\n\ngrid_size = 7\npattern = \"four_rooms\"\nenv = SimpleGrid(grid_size, block_pattern=pattern, obs_mode=\"index\")\nenv.reset(agent_pos=[0, 0], goal_pos=[0, grid_size - 1])\n\ntrain_episode_length = 50\ntest_episode_length = 50\nepisodes = 2000\ngamma = 0.95\nlr = 5e-2\nepsilon = 0.1\nbatch_size = 100\n\n# plt.imshow(env.grid)\n# plt.show()\n\n\n# sess.run(tf.global_variables_initializer())\n\ndef _build_layer(input_dim, output_dim, input_data, c_name, layer_name, w_name='w', bias_name='b', has_activate=True):\n with tf.variable_scope(layer_name):\n w = tf.get_variable(w_name, [input_dim, output_dim], dtype=tf.float32, initializer=tf.truncated_normal_initializer(),\n collections=c_name)\n b = tf.get_variable(bias_name, [output_dim], dtype=tf.float32, initializer=tf.truncated_normal_initializer(), collections=c_name)\n if has_activate:\n l = tf.nn.relu(tf.add(tf.matmul(input_data, w), b))\n else:\n l = tf.add(tf.matmul(input_data, w), b)\n return l\n\n\nclass TabularSuccessorAgent(object):\n def __init__(self, n_state, n_action, learning_rate, gamma, replay_buffer_size=3000, sess: tf.Session = None):\n self.n_state = n_state\n self.n_action = n_action\n self.fai_s_size = 512\n # shape: (state_size, action_size)\n self.w = np.zeros([n_state])\n self.learning_rate = learning_rate\n self.gamma = gamma\n self.replay_buffer = np.zeros([replay_buffer_size, self.n_state * 2 + 2])\n self.memory_size = replay_buffer_size\n self.memory_count = 0\n self.state = tf.placeholder(tf.float32, [None, self.n_state])\n self.state_hat = tf.placeholder(tf.float32, [None, self.n_state])\n self.state_ = tf.placeholder(tf.float32, [None, self.n_state])\n self.rs_p = tf.placeholder(tf.float32, [None, 1])\n if sess is None:\n self.sess = tf.Session()\n else:\n self.sess = sess\n self.eval_collection_name = ['eval_net_collection', tf.GraphKeys.GLOBAL_VARIABLES]\n self.target_collection_name = ['target_net_collection', tf.GraphKeys.GLOBAL_VARIABLES]\n\n shutil.rmtree(\"./log\")\n os.mkdir(\"./log\")\n with tf.variable_scope('assign_op'):\n e_params = tf.get_collection('eval_collection_name')\n t_params = tf.get_collection('target_net_collection')\n self.assign_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n with tf.variable_scope('eval_net'):\n self.eval_fai, self.eval_s_hat, self.eval_r_s, self.eval_M = self._build_net(self.eval_collection_name)\n with tf.variable_scope('target_net'):\n self.eval_fai, self.target_s_hat, self.target_r_s, self.target_M = self._build_net(self.target_collection_name)\n tf.summary.FileWriter(\"./log\", self.sess.graph)\n self.sess.run(tf.global_variables_initializer())\n\n def _build_net(self, c_name):\n # 这里我们将一维的state进行one-hot编码处理, 因此输入的维度是1*n_state\n with tf.variable_scope('f_theta'):\n l1 = _build_layer(self.n_state, 128, self.state, c_name, 'l1')\n l2 = _build_layer(128, 512, l1, c_name, 'l2')\n l3 = _build_layer(512, 1024, l2, c_name, 'l3')\n l4 = _build_layer(1024, 512, l3, c_name, 'fai_s')\n with tf.variable_scope('g_theta_'):\n dl1 = _build_layer(512, 1024, l4, c_name, 'dl1')\n dl2 = _build_layer(1024, 512, dl1, c_name, 'dl2')\n dl3 = _build_layer(512, 128, dl2, c_name, 'dl3')\n s_hat = _build_layer(128, self.n_state, dl3, c_name, 'dl4')\n with tf.variable_scope('R_s'):\n r_s = _build_layer(512, 1, l4, c_name, 'r', has_activate=False)\n with tf.variable_scope(\"mu_alpha\"):\n M = np.stack([np.zeros(self.fai_s_size, object) for i in range(self.n_action)])\n for i in range(self.n_action):\n miu_layer = _build_layer(256, 512, _build_layer(512, 256, _build_layer(512, 512, l4, c_name, 'm%s-1' % i, 'miu_alpha_l1_w', 'miu_alpha_l1_b'),\n c_name, 'm%s-2' % i, 'miu_alpha_l2_w', 'miu_alpha_l2_b'), c_name, 'm%s-3' % i, 'miu_alpha_l3_w',\n 'miu_alpha_l3_b')\n M[i] = miu_layer\n return l4, s_hat, r_s, M\n\n def store_transition(self, s, a, r, s_):\n transition = np.hstack((s, [a, r], s_))\n index = self.memory_count % self.memory_size\n self.replay_buffer[index] = transition\n self.memory_count += 1\n\n\n # a, M1 = self.choose_action(self.state, 0)\n # a_, M2 = self.choose_action(self.state_, 0)\n # loss3 = tf.squared_difference(l4 + self.gamma*M2[a_] - M1[a])\n\n def choose_action(self, state, epsilon=1.):\n # 计算q值. epsilon为随机度. 为1时, 则为全随机选择action\n M_s = np.zeros([self.n_action, self.fai_s_size])\n for i in range(self.n_action):\n M_s[i] = self.sess.run(self.eval_M[i], feed_dict={self.state, state})\n w = self.sess.graph.get_tensor_by_name('eval_net/R_s/r/w:0')\n # fai = self.sess.graph.get_tensor_by_name('eval_net/f_theta/fai_s:0')\n q_ = tf.matmul(w, tf.Variable(M_s))\n if np.random.uniform(0, 1) < epsilon:\n action = np.random.randint(self.n_action)\n else:\n action = np.argmax(q_)\n return action\n\n def learn(self):\n if self.memory_count > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=batch_size)\n else:\n sample_index = np.random.choice(self.memory_count, size=batch_size)\n train_data_sets = self.replay_buffer[sample_index, :]\n loss1 = tf.reduce_mean(tf.squared_difference(self.rs_p, train_data_sets[:, -1]))\n loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=utils.onehot_mat(train_data_sets[:, ]), logits=self.state_hat))\n\n # def update_rs(self, current_exp):\n #\n # # A simple update rule. current_exp's shape: [state, a, next_state, r, done]\n # s_ = current_exp[2]\n # r = current_exp[3]\n # error = r - self.w[s_]\n # self.w[s_] += self.learning_rate * error # w相当于公式中对R(s)的更新\n # return error\n\n # def update_sr(self, current_exp, next_exp):\n # # SARSA TD learning rule\n # # update the M(s, s', a)\n # s = current_exp[0] # current state\n # s_a = current_exp[1] # choosed action\n # s_ = current_exp[2] # next state\n # s_a_1 = next_exp[1] # next state choosed action\n # r = current_exp[3] # reward in current state\n # d = current_exp[4] # wheather the current state is terminal\n # I = utils.onehot(s, env.state_size) # transform current state to one-hot vector\n # if d:\n # td_error = (I + self.gamma * utils.onehot(s_, env.state_size) - self.M[s_a, s, :])\n # else:\n # td_error = (I + self.gamma * self.M[s_a_1, s_, :] - self.M[s_a, s, :])\n # self.M[s_a, s, :] += self.learning_rate * td_error\n # return td_error\n\n\n\nagent = TabularSuccessorAgent(env.state_size, env.action_size, lr, gamma)\n\ntest_experiences = []\ntest_lengths = []\nlifetime_td_errors = []\n\nfor i in range(episodes):\n # Train phase\n agent_start = [0, 0]\n if i < episodes // 2:\n goal_pos = [0, grid_size - 1]\n else:\n if i == episodes // 2:\n print(\"Switched reward locations\")\n goal_pos = [grid_size - 1, grid_size - 1]\n\n env.reset(agent_pos=agent_start, goal_pos=goal_pos)\n state = env.observation\n episodic_error = []\n for j in range(train_episode_length):\n action = agent.choose_action(state, epsilon)\n reward = env.step(action)\n state_next = env.observation\n done = env.done\n agent.store_transition(state, action, reward, state_next)\n state = state_next\n if agent.memory_count > batch_size:\n agent.learn()\n\n # if (j > 1):\n # # 至少会和环境交互两次. 当前experience: experience[-1], 前一次experience[-2]\n # td_sr = agent.update_sr(experiences[-2], experiences[-1])\n # td_w = agent.update_w(experiences[-1])\n # episodic_error.append(np.mean(np.abs(td_sr)))\n # if env.done:\n # td_sr = agent.update_sr(experiences[-1], experiences[-1])\n # episodic_error.append(np.mean(np.abs(td_sr)))\n # break\n lifetime_td_errors.append(np.mean(episodic_error))\n\n # Test phase\n env.reset(agent_pos=agent_start, goal_pos=goal_pos)\n state = env.observation\n for j in range(test_episode_length):\n action = agent.choose_action(state, epsilon=test_epsilon)\n reward = env.step(action)\n state_next = env.observation\n test_experiences.append([state, action, state_next, reward])\n state = state_next\n if env.done:\n break\n test_lengths.append(j)\n\n if i % 50 == 0:\n print('\\rEpisode {}/{}, TD Error: {}, Test Lengths: {}'\n .format(i, episodes, np.mean(lifetime_td_errors[-50:]),\n np.mean(test_lengths[-50:])), end='')\n\n# fig = plt.figure(figsize=(10, 6))\n#\n# ax = fig.add_subplot(2, 2, 1)\n# ax.plot(lifetime_td_errors)\n# ax.set_title(\"TD Error\")\n# ax = fig.add_subplot(2, 2, 2)\n# ax.plot(test_lengths)\n# ax.set_title(\"Episode Lengths\")\n# plt.show()\n" }, { "alpha_fraction": 0.4433070719242096, "alphanum_fraction": 0.4535433053970337, "avg_line_length": 26.565217971801758, "blob_id": "99045f72d8224086e13807e20f59a21720187afc", "content_id": "7db5e9c8e1c1d06d92e0cc709f89314c963b199a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1270, "license_type": "no_license", "max_line_length": 65, "num_lines": 46, "path": "/algorithm/lock_file.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "\ndef get_file_content(file='./__init__.py'):\n key = 'yuteryxs'\n i = 0\n len_key = len(key)\n new_file = open(\"./new_file\", 'wb')\n with open(file, 'r') as f:\n str = f.read()\n print(len(str))\n new_str = \"\"\n for ch in str:\n if i == len_key:\n i = 0\n # print('original: %d'%ord(ch))\n new_ch = chr(ord(ch) + ord(key[i]) - 120)\n # print('new char : %d'%ord(new_ch))\n i += 1\n new_str += new_ch\n print(new_str)\n for ch in new_str:\n print(ord(ch))\n print(len(new_str))\n new_file.write(new_str.encode('utf-8'))\n new_file.close()\n\ndef unlock_file():\n key = 'yuteryxs'\n i = 0\n len_key = len(key)\n unlock_file_text = \"\"\n unl_file = open('./unlocak_file', 'w')\n with open('./new_file', 'r') as f:\n str = f.read()\n print(str)\n for ch in str:\n print(ord(ch))\n if i == len_key:\n i = 0\n unlock_file_text += chr(ord(ch) - ord(key[i]) + 120)\n i += 1\n for ch in unlock_file_text:\n print(ord(ch))\n print(len(unlock_file_text))\n unl_file.write(unlock_file_text)\n\nunlock_file()\n# get_file_content()\n\n" }, { "alpha_fraction": 0.4892742931842804, "alphanum_fraction": 0.5171154737472534, "avg_line_length": 30.285715103149414, "blob_id": "652e5e3c4561e119eb4655ea61f082d338ff4ae9", "content_id": "6e8b7cdd568d5da537a02cbb2c7dce15dde390ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2377, "license_type": "no_license", "max_line_length": 115, "num_lines": 70, "path": "/algorithm/offer1.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "class ListNode(object):\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\n\ndef findKthToTail(head, k):\n if head is None:\n raise Exception(\"head is None!\")\n if k == 0:\n raise Exception(\"The value of K can not be 0!\")\n ahead = head\n for i in range(k - 2):\n if ahead is not None:\n ahead = ahead.next\n else:\n raise Exception(\"There is an Error!\")\n behind = head\n while ahead is not None:\n ahead = ahead.next\n behind = behind.next\n print(behind.data)\n\n\n# 找出重复数字\ndef get_duplication(a, length):\n if a is None or length <= 0: raise Exception(\"a is None or length Error!\")\n for i in range(length):\n while a[i] != i:\n if a[i] == a[a[i]]:\n return a[i]\n else:\n t = a[i]\n a[i] = a[a[i]]\n a[a[i]] = t\n\n\n# 二位数组查找.在一个二维数组中,每一行都是按照从左到右递增的顺去排列,每一列也是按照从上至下递增顺序排列。\n# 请完成一个函数,输入这样的一个二维数组和整数,判断数组中是否含有该整数\ndef search_2_dim(array, n, *shape):\n if len(shape) != 2: raise Exception(\"shape must have 2 dimension!\")\n if array is None or n is None: raise Exception(\"There is an error in array or n!\")\n start_line = 0\n end_col = shape[1] - 1\n tmp = array[start_line][end_col]\n while n != tmp:\n if start_line == shape[0] or end_col < 0: return None\n if n > tmp:\n start_line += 1\n else:\n for i in range(end_col, -1, -1):\n if array[start_line][i] == n:\n return (start_line, i)\n return None\n if n < tmp:\n end_col -= 1\n else:\n for i in range(shape[0]-1, -1, -1):\n if array[i][end_col] == n:\n return (i, end_col)\n return None\n tmp = array[start_line][end_col]\n return (start_line, end_col)\n\n\nif __name__ == '__main__':\n # head = ListNode(8, ListNode(7, ListNode(6, ListNode(5, ListNode(4, ListNode(3, ListNode(2, ListNode(1))))))))\n # findKthToTail(head, 3)\n # print(get_duplication([3,4,2,3,5], 5))\n print(search_2_dim([[1, 2, 8, 9], [2, 4, 9, 12], [4, 7, 10, 13], [6, 8, 11, 15]], 12, 4, 4))\n\n" }, { "alpha_fraction": 0.5454867482185364, "alphanum_fraction": 0.5610619187355042, "avg_line_length": 31.102272033691406, "blob_id": "b765400c598edc0a2da0948a0f4a3b0261c5a197", "content_id": "4d004371ec7291526bdd32afaf24810df91d1212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2843, "license_type": "no_license", "max_line_length": 95, "num_lines": 88, "path": "/reinforcement_learning/SR/dsr_brain_torch.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as Data\nimport torchvision\nimport numpy as np\nfrom dsr_maze_env import DSRMaze\n\n\nclass AutoEncoder(nn.Module):\n def __init__(self, feature_size, action_size, phi_size=4, buffer_size=100, batch_size=32):\n super(AutoEncoder, self).__init__()\n self.feature_size = feature_size # states' feature dimension.\n self.action_size = action_size\n self.phi_size = phi_size\n self.count = 0\n self.batch_size = batch_size\n\n self.encoder = nn.Sequential(\n nn.Linear(self.feature_size, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, self.phi_size)\n )\n\n self.decoder = nn.Sequential(\n nn.Linear(4, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, self.feature_size)\n )\n\n def forward(self, x):\n # 自动编码器训练部分\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return encoded, decoded\n\n\nclass Agent():\n def __init__(self):\n self.env = DSRMaze()\n self.count = 0\n self.buffer_size = 100\n self.replay_buffer = np.zeros(\n (self.buffer_size, self.env.feature_size * 2 + 2), dtype=float)\n self.batch_size = 8\n\n def store_transition(self, s, a, r, s_):\n index = self.count % self.buffer_size\n self.replay_buffer[index] = np.hstack((s, [a, r], s_))\n self.count += 1\n\n def get_train_data_set(self):\n choice_index = np.random.choice(\n self.buffer_size if self.buffer_size < self.count else self.count, self.batch_size)\n return self.replay_buffer[choice_index]\n\n def play(self):\n for i in range(100):\n s = self.env.get_current_state()\n action_index = np.random.randint(0, 4, 1)\n s_, reward, done = self.env.step(action_index[0])\n self.store_transition(s, action_index[0], reward, s_)\n if done:\n self.env.reset()\n\n autoEncoder = AutoEncoder(self.env.feature_size, self.env.action_size)\n optimizer = torch.optim.Adam(autoEncoder.parameters, 0.005)\n loss_func = nn.MSELoss()\n train_data = self.replay_buffer[:, 0: self.env.feature_size]\n for epoch in range(1000):\n b_x = train_data[:, 0:self.env.feature_size]\n b_y = train_data[:, 0:self.env.feature_size]\n encoded, decoded = autoEncoder(b_x)\n loss = loss_func(decoded, b_y)\n optimizer.zero_grad()\n loss.backward()\n\n optimizer.step()\n print('Epoch:', epoch, ' | train loss: %.4f' % loss.data.numpy())\n\n\nif __name__ == '__main__':\n agent = Agent()\n agent.play()\n" }, { "alpha_fraction": 0.39777669310569763, "alphanum_fraction": 0.409859836101532, "avg_line_length": 25.87013053894043, "blob_id": "674f30675d7fb6a0b09999426cad5edf297e6180", "content_id": "75ab76fd8a67e22df799ce79c8871726c7eff2e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "no_license", "max_line_length": 55, "num_lines": 77, "path": "/graph/top_sort.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 拓扑排序\nimport copy\n\nclass KeyRoute:\n def __init__(self):\n pass\n\n @staticmethod\n def top_sort(Graph):\n \"\"\"\n topologically sorted DAG\n :param Graph:\n :return:\n \"\"\"\n in_degrees = dict((u, 0) for u in Graph.keys())\n for u in Graph.keys():\n for v in Graph[u].keys():\n if v in in_degrees:\n in_degrees[v] += 1\n else:\n in_degrees[v] = 0\n Q = [u for u in Graph if in_degrees[u] == 0]\n '''至少又一个入度为0,否则此图为有环图,不成立'''\n S = []\n while len(Q) > 0:\n u = Q.pop()\n S.append(u)\n for v in Graph[u]:\n in_degrees[v] -= 1\n if in_degrees[v] == 0:\n Q.append(v)\n return S\n\n @staticmethod\n def event_earliest_time(vnum, Graph, topseq):\n '''\n @param vnum: 节点数量\n @param graph: 图\n @param topseq: 拓扑排序序列\n '''\n ee = dict((e, 0) for e in topseq)\n for i in topseq:\n for j in Graph[i]:\n if ee[i] + Graph[i][j] > ee[j]:\n ee[j] = ee[i] + Graph[i][j]\n return ee\n\n @staticmethod\n def event_latest_time(Graph, topseq, eelast):\n tmp_topseq = copy.deepcopy(topseq)\n el = dict((e, eelast) for e in tmp_topseq)\n for i in range(len(topseq)-1, -1, -1):\n k = topseq[i]\n for key, value in Graph.items():\n if k in value.keys():\n if el[k] - Graph[key][k] < el[key]:\n el[key] = el[k] - Graph[key][k]\n return el\n\n\nif __name__ == '__main__':\n G = {\n 'A': {'B': 6, 'C': 4, 'D': 5},\n 'B': {'E': 1},\n 'C': {'E': 1},\n 'D': {'F': 2},\n 'E': {'G': 5, 'H': 7},\n 'F': {'G': 4},\n 'G': {'I': 4},\n 'H': {'I': 2},\n 'I': {}\n }\n S = KeyRoute.top_sort(G)\n print(S)\n ete = KeyRoute.event_earliest_time(9, G, S)\n print(ete)\n elt = KeyRoute.event_latest_time(G, S, ete[S[-1]])\n" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7356321811676025, "avg_line_length": 43, "blob_id": "35f45606fc9ef494a7b995079d551d3a2c20d7a8", "content_id": "058e7654cbebdd05fe17ebdde90af5dcc27252ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 87, "license_type": "no_license", "max_line_length": 76, "num_lines": 2, "path": "/reinforcement_learning/DQN-Breakout-v4/start_train.sh", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "#!/bin/zsh\npython ./runner.py --env_name=BreakoutNoFrameskip-v4 --train_dqn --do_render" }, { "alpha_fraction": 0.504531741142273, "alphanum_fraction": 0.5317220687866211, "avg_line_length": 21.133333206176758, "blob_id": "2f21716a0b7d6e37e3cb59ab9723deb5767298bd", "content_id": "67eeac61fd2472518cb1950199d67c3e2a1b3d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 55, "num_lines": 15, "path": "/InterNET/login.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# coding=utf-8\ndef data_01():\n count = int(input(\"请输入累加元素个数:\"))\n my_data = list()\n for num in range(0,count):\n my_num = float(input(\"请输入第\"+str(num+1)+\"个数据:\"))\n my_data.append(my_num)\n print(my_data)\n ss = 0.0\n for num in my_data:\n ss += num\n di = ss/(len(my_data))\n print(di)\n\ndata_01()" }, { "alpha_fraction": 0.7847222089767456, "alphanum_fraction": 0.7847222089767456, "avg_line_length": 35.25, "blob_id": "b777ba5327b28b188b559776bdecdb28283322e4", "content_id": "3a09f4c16f15d90706111cabc4ca613ddb275438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 64, "num_lines": 4, "path": "/operate_sys/__init__.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "# 操作系统算法实现\nfrom __future__ import division, absolute_import, print_function\nfrom .priority_algorithm import *\nfrom .short_task_priority import *" }, { "alpha_fraction": 0.5396825671195984, "alphanum_fraction": 0.5547618865966797, "avg_line_length": 22.79245376586914, "blob_id": "3c769ebb630b30649dbcfbe57f97cfd46ec45052", "content_id": "4f1b84c49a6adedaf9f15ab19e205b6f46b74e48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1260, "license_type": "no_license", "max_line_length": 48, "num_lines": 53, "path": "/ml/spam_classifier/spam.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "from collections import Counter\nimport os\n\ndef make_dictionary():\n direc = './email/'\n files = os.listdir(direc)\n\n emails = [direc + email for email in files]\n words = []\n c = len(emails)\n\n for email in emails:\n f = open(email, encoding='ISO 8859-1')\n blob = f.read()\n words += blob.split(' ')\n print(c)\n c -= 1\n for i in range(len(words)):\n if not words[i].isalpha():\n words[i] = ''\n\n dictionary = Counter(words)\n del dictionary['']\n return dictionary.most_common(3000)\n\ndef make_dataset(dictionary):\n direct = './email/'\n files = os.listdir(direct)\n emails = [direct + email for email in files]\n feature_set = []\n labels = []\n c = len(emails)\n for email in emails:\n print(email)\n data = []\n f = open(email, encoding='ISO 8859-1')\n words = f.read().split(' ')\n for entry in dictionary:\n data.append(words.count(entry[0]))\n feature_set.append(data)\n\n if 'ham' in email:\n labels.append(0)\n if 'spam' in email:\n labels.append(1)\n print(c)\n c -= 1\n return feature_set, labels\n\n\n\ndictionary = make_dictionary()\nfeatures, labels = make_dataset(dictionary)" }, { "alpha_fraction": 0.6143497824668884, "alphanum_fraction": 0.6502242088317871, "avg_line_length": 23.77777862548828, "blob_id": "bf41090cf4e89e2e009795776b6a7a7edf40f9e4", "content_id": "6e663b5383ebc77018225124f5a232cd0d25f644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 59, "num_lines": 9, "path": "/reinforcement_learning/DSR-Maze/runner.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import gym\nfrom maze_agent import AgentDSR\nimport gym_maze\n\nif __name__ == '__main__':\n env = gym.make(\"maze-sample-5x5-v0\")\n env.render()\n agent = AgentDSR(env, n_steps=40000, output_graph=True)\n agent.train()\n" }, { "alpha_fraction": 0.5762237906455994, "alphanum_fraction": 0.5904095768928528, "avg_line_length": 34, "blob_id": "872ef072580ec54c1f5fc7d8bee23c4e3a216922", "content_id": "5565fd20008ee5044989fce2a61261cd910ce40c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5055, "license_type": "no_license", "max_line_length": 109, "num_lines": 143, "path": "/reinforcement_learning/SR/sr_brain.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nfrom sklearn.decomposition import PCA\nfrom gridworld import SimpleGrid\nfrom tensorflow._api.v2.compat import v1 as tf\nfrom dsr_maze_env import DSRMaze\ntf.disable_eager_execution()\ntf.disable_v2_behavior()\n\ngrid_size = 7\npattern=\"four_rooms\"\n# env = SimpleGrid(grid_size, block_pattern=pattern, obs_mode=\"index\")\n# env.reset(agent_pos=[0,0], goal_pos=[0, grid_size-1])\nenv = DSRMaze()\nenv.reset()\n# plt.imshow(env.grid)\n# plt.show()\nsess = tf.Session()\n# sess.run(tf.global_variables_initializer())\n\nclass TabularSuccessorAgent(object):\n def __init__(self, n_state, n_action, learning_rate, gamma):\n self.n_state = n_state\n self.n_action = n_action\n # shape: (state_size, action_size)\n self.M = np.stack([np.identity(n_state) for i in range(n_action)])\n self.w = np.zeros([n_state])\n self.learning_rate = learning_rate\n self.gamma = gamma\n \n def Q_estimates(self, state, goal=None):\n # Generate Q values for all actions.\n if goal == None:\n goal = self.w\n else:\n goal = utils.onehot(goal, self.n_state)\n return np.matmul(self.M[:,state,:],goal)\n \n def choose_action(self, state, goal=None, epsilon=0.0):\n # Samples action using epsilon-greedy approach\n if np.random.uniform(0, 1) < epsilon:\n action = np.random.randint(self.n_action)\n else:\n Qs = self.Q_estimates(state, goal)\n action = np.argmax(Qs)\n return action\n \n def update_w(self, current_exp):\n # A simple update rule. current_exp's shape: [state, a, next_state, r, done]\n s_ = current_exp[2]\n r = current_exp[3]\n error = r - self.w[s_]\n self.w[s_] += self.learning_rate * error # w相当于公式中对R(s)的更新\n return error\n \n def update_sr(self, current_exp, next_exp):\n # SARSA TD learning rule\n # update the M(s, s', a)\n s = current_exp[0] # current state\n s_a = current_exp[1] # choosed action\n s_ = current_exp[2] # next state\n s_a_1 = next_exp[1] # next state choosed action\n r = current_exp[3] # reward in current state\n d = current_exp[4] # wheather the current state is terminal\n I = utils.onehot(s, env.state_size) # transform current state to one-hot vector \n if d: \n td_error = (I + self.gamma * utils.onehot(s_, env.state_size) - self.M[s_a, s, :]) \n else:\n td_error = (I + self.gamma * self.M[s_a_1, s_, :] - self.M[s_a, s, :])\n self.M[s_a, s, :] += self.learning_rate * td_error\n return td_error\n\ntrain_episode_length = 50\ntest_episode_length = 50\nepisodes = 2000\ngamma = 0.95\nlr = 5e-2\ntrain_epsilon = 1.0\ntest_epsilon = 0.1\n\nagent = TabularSuccessorAgent(env.state_size, env.action_size, lr, gamma)\n\nexperiences = [] #replay buffer [(current_state), action, (next_state), done] == experience.shape\ntest_experiences = []\ntest_lengths = []\nlifetime_td_errors = []\n\nfor i in range(episodes):\n # Train phase\n agent_start = [0,0]\n if i < episodes // 2:\n goal_pos = [0, grid_size-1]\n else:\n if i == episodes // 2:\n print(\"\\nSwitched reward locations\")\n goal_pos = [grid_size-1,grid_size-1]\n env.reset()\n state = env.get_current_state()\n episodic_error = []\n for j in range(train_episode_length):\n action = agent.choose_action(state, epsilon=train_epsilon)\n s_, reward, done = env.step(action)\n experiences.append([state, action, s_, reward, done])\n state = s_\n if (j > 1):\n # 至少会和环境交互两次. 当前experience: experience[-1], 前一次experience[-2]\n td_sr = agent.update_sr(experiences[-2], experiences[-1])\n td_w = agent.update_w(experiences[-1])\n episodic_error.append(np.mean(np.abs(td_sr)))\n if env.done:\n td_sr = agent.update_sr(experiences[-1], experiences[-1])\n episodic_error.append(np.mean(np.abs(td_sr)))\n break\n lifetime_td_errors.append(np.mean(episodic_error))\n \n # Test phase\n env.reset()\n state = env.observation\n for j in range(test_episode_length):\n action = agent.choose_action(state, epsilon=test_epsilon)\n reward = env.step(action)\n state_next = env.observation\n test_experiences.append([state, action, state_next, reward])\n state = state_next\n if env.done:\n break\n test_lengths.append(j)\n \n if i % 50 == 0:\n print('\\rEpisode {}/{}, TD Error: {}, Test Lengths: {}'\n .format(i, episodes, np.mean(lifetime_td_errors[-50:]), \n np.mean(test_lengths[-50:])), end='')\n\nfig = plt.figure(figsize=(10, 6))\n\nax = fig.add_subplot(2, 2, 1)\nax.plot(lifetime_td_errors)\nax.set_title(\"TD Error\")\nax = fig.add_subplot(2, 2, 2)\nax.plot(test_lengths)\nax.set_title(\"Episode Lengths\")\nplt.show()\n" }, { "alpha_fraction": 0.39875528216362, "alphanum_fraction": 0.4056456983089447, "avg_line_length": 36.19008255004883, "blob_id": "01a42b6ef28047f4cc4607309fe8680467c0b5e1", "content_id": "b020c1ed1fff28698f8f529f0b7cd828edde418c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5339, "license_type": "no_license", "max_line_length": 127, "num_lines": 121, "path": "/softmax/softmax_loss.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import numpy as np\n\"\"\"\n 第一种计算softmax_loss, 在计算每一分类的概率时,用到了矩阵运算。\n\"\"\"\nclass Softmax:\n\n def __init__(self):\n pass\n\n @staticmethod\n def softmax_loss_naive(w, x, y, reg):\n \"\"\"\n 使用显示循环版本计算softmax损失函数\n N:数据个数, D:数据维度, C:数据类别个数\n inputs:\n :param w: shape:[D, C], 分类器权重参数\n :param x: shape:[N, D] 训练数据\n :param y: shape:[N, ]数据标记\n :param reg: 惩罚项系数\n :return:二元组:loss:数据损失值, dw:权重w对应的梯度,其形状和w相同\n \"\"\"\n loss = 0.0\n dw = np.zeros_like(w)\n #############################################################################\n # 任务:使用显式循环实现softmax损失值loss及相应的梯度dW 。 #\n # 温馨提示: 如果不慎,将很容易造成数值上溢。别忘了正则化哟。 #\n #############################################################################\n y = np.asarray(y, dtype=np.int)\n num_train, dim = x.shape\n num_class = w.shape[1]\n score = x.dot(w)\n score_max = np.max(score, axis=1).reshape(num_train, 1)\n # 计算对数概率, prob.shape = N*D, 每一行与一个样本对应, 每一行的概率和为1\n z = np.sum(np.exp(score - score_max), axis=1, keepdims=True)\n z1 = np.sum(np.exp(score - score_max), axis=1)\n e_j = np.exp(score - score_max)\n prob = e_j / z\n for i in range(num_train):\n loss += -np.log(prob[i, y[i, 0]]) # loss = 1 - prob[i, y[i, 0]], loss = log(loss), so loss = -np.log(prob[i, y[i]])\n for j in range(num_class):\n if j == y[i]:\n dw[:, j] += -(1-prob[i, j]) * x[i]\n else:\n dw[:, j] += prob[i, j] * x[i]\n loss = loss / num_train + 0.5 * reg * np.sum(w*w)\n dw = dw / num_train + reg * w\n return loss, dw\n\n \"\"\"\n 第二种计算softmax_loss, 纯向量计算\n \"\"\"\n @staticmethod\n def softmax_loss_naive_vec(w, x, y, reg):\n \"\"\"\n N:数据个数, D:数据维度, C:数据类别个数\n inputs:\n :param w: shape:[D, C], 分类器权重参数\n :param x: shape:[N, D] 训练数据\n :param y: shape:[N, ]数据标记\n :param reg: 惩罚项系数\n :return:二元组:loss:数据损失值, dw:权重w对应的梯度,其形状和w相同\n \"\"\"\n loss = 0.0\n dw = np.zeros_like(w)\n #############################################################################\n # 任务:使用显式循环实现softmax损失值loss及相应的梯度dW 。 #\n # 温馨提示: 如果不慎,将很容易造成数值上溢。别忘了正则化哟。 #\n #############################################################################\n num_train = x.shape[0]\n num_class = w.shape[1]\n y = np.asarray(y, dtype=np.int)\n for i in range(num_train):\n s = x[i].dot(w)\n score = s - np.max(s)\n score_E = np.exp(score)\n Z = np.sum(score_E)\n score_target = score_E[y[i]]\n loss += -np.log(score_target / Z)\n for j in range(num_class):\n if j == y[i]:\n dw[:, j] += -x[i] * (1 - score_E[j] / Z)\n else:\n dw[:, j] += x[i] * (score_E[j] / Z)\n\n loss = loss / num_train + 0.5*reg*np.sum(w*w)\n dw = dw / num_train + reg*w\n return loss, dw\n\n \"\"\"\n 纯矩阵计算\n \"\"\"\n @staticmethod\n def softmax_loss_matrix(w, x, y, reg):\n \"\"\"\n 使用纯矩阵运算\n N:数据个数, D:数据维度, C:数据类别个数\n inputs:\n :param w: shape:[D, C], 分类器权重参数\n :param x: shape:[N, D] 训练数据\n :param y: shape:[N, ]数据标记\n :param reg: 惩罚项系数\n :return:二元组:loss:数据损失值, dw:权重w对应的梯度,其形状和w相同\n \"\"\"\n loss = 0.0\n dw = np.zeros_like(w)\n #############################################################################\n # 任务:使用显式循环实现softmax损失值loss及相应的梯度dW 。 #\n # 温馨提示: 如果不慎,将很容易造成数值上溢。别忘了正则化哟。 #\n #############################################################################\n num_train = x.shape[0]\n y = np.asarray(y, dtype=np.int)\n s = x.dot(w)\n score = s - np.max(s, axis=1, keepdims=True)\n score_E = np.exp(score)\n Z = np.sum(score_E, axis=1, keepdims=True)\n prob = score_E / Z\n y_true_class = np.zeros_like(prob)\n y_true_class[range(num_train), y.reshape(num_train)] = 1.0\n loss += -np.sum(y_true_class * np.log(prob)) / num_train + 0.5*reg*np.sum(w*w)\n dw += -np.dot(x.T,y_true_class - prob) / num_train + reg * w\n return loss, dw" }, { "alpha_fraction": 0.5860279202461243, "alphanum_fraction": 0.6119760274887085, "avg_line_length": 34.2957763671875, "blob_id": "924f3a01045e755a217bfd7d788c50a6a1c49cf0", "content_id": "ec1112148ba584819252c56023abfde13f9bc522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2559, "license_type": "no_license", "max_line_length": 116, "num_lines": 71, "path": "/nn/elman.py", "repo_name": "deepBrainWH/MLAlgorithm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\n\nINPUT_UNITS = 4\nHIDDEN_UNITS = 9\nUNDER_TAKE_UNITS = HIDDEN_UNITS\nOUTPUT_UNITS = 1\n'''\n Elman神经网络手写实现.\n'''\n\ndef build_model():\n\n with tf.name_scope(\"input\"):\n x = tf.placeholder(dtype=tf.float32, name=\"input\", shape=[None, INPUT_UNITS])\n y = tf.placeholder(dtype=tf.float32, name='output', shape=[None, OUTPUT_UNITS])\n undertake = tf.placeholder(dtype=tf.float32, name=\"undertake\", shape=[None, HIDDEN_UNITS]) # 承接层输入 -- 初始值为0\n\n with tf.name_scope(\"input-layer\"):\n w1 = tf.Variable(tf.random.truncated_normal((INPUT_UNITS, HIDDEN_UNITS)),\n name=\"input-weights\") # 输入层权重\n b1 = tf.Variable(tf.random.truncated_normal((1, HIDDEN_UNITS)))\n x_ck = tf.nn.sigmoid(tf.matmul(x, w1) + b1)\n\n with tf.name_scope(\"undertake-layer\"):\n w2 = tf.Variable(tf.random.truncated_normal((HIDDEN_UNITS, HIDDEN_UNITS)),\n name=\"undertake-weights\") # 承接层权重\n b2 = tf.Variable(tf.random.truncated_normal((1, HIDDEN_UNITS)))\n u_k_1 = tf.nn.sigmoid(tf.matmul(undertake, w2) + b2)\n\n with tf.name_scope(\"hidden-layer\"):\n hidden = tf.add(x_ck, u_k_1, 'add')\n\n with tf.name_scope(\"output-layer\"):\n w3 = tf.Variable(tf.random.truncated_normal((HIDDEN_UNITS, OUTPUT_UNITS)),\n name=\"output-weights\")\n b3 = tf.Variable(tf.random.truncated_normal((1, OUTPUT_UNITS)))\n x_k = tf.nn.relu(hidden * w3 + b3)\n\n with tf.name_scope(\"loss-value\"):\n RMSE = tf.sqrt(tf.reduce_mean(tf.square(x_k - y)))\n\n tf.summary.scalar(\"RMSE-loss\", RMSE)\n\n opt = tf.train.AdadeltaOptimizer(0.001).minimize(RMSE)\n\n merged = tf.summary.merge_all()\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n train_writer = tf.summary.FileWriter(\"./log\", sess.graph)\n undertake_k_1 = np.zeros((1, HIDDEN_UNITS))\n\n input_x, input_y = get_input_data()\n batches = input_x.shape[0]\n for i in range(2500):\n for j in range(batches):\n _, undertake_k_1 = sess.run([opt, x_k], feed_dict={x: input_x, y: input_y, undertake: undertake_k_1})\n if i % 10 == 0:\n summary = sess.run(merged, feed_dict={x: input_x, y: input_y, undertake: undertake_k_1})\n train_writer.add_summary(summary)\n\n\ndef get_input_data():\n\n x = np.random.randint(0, 10, [1000, 4], dtype=np.float32)\n y = np.random.randint(10,100,[1000], dtype=np.float32)\n\n return x, y\n\nbuild_model()" } ]
69
jpatch22/sailbot_quiz
https://github.com/jpatch22/sailbot_quiz
0682ed13d9523dbb4c62908e8759fecb5a97c258
3a19b216009c1ca6bec6c5130f384819b13184a9
2b1764b746e86593c6a701eaa022cbc2c7f76873
refs/heads/main
2023-07-31T09:12:37.196062
2021-09-13T21:19:42
2021-09-13T21:19:42
405,774,043
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5724976658821106, "alphanum_fraction": 0.6772684454917908, "avg_line_length": 23.295454025268555, "blob_id": "f7937fb9782b14c30237d3061b24247f08cddc74", "content_id": "56f157008b8b605ddc31ef8416df5a87f355e659", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 59, "num_lines": 44, "path": "/sailbot_test.py", "repo_name": "jpatch22/sailbot_quiz", "src_encoding": "UTF-8", "text": "from sailbot_quiz import AngleCalc\nimport pytest\n\ndef test_bound_1():\n assert AngleCalc.boundTo180(425) == 65\n\ndef test_bound_2():\n assert AngleCalc.boundTo180(360) == 0\n\ndef test_bound_3():\n assert AngleCalc.boundTo180(270) == -90\n\ndef test_bound_4():\n assert AngleCalc.boundTo180(-450) == -90\n\ndef test_bound_5():\n assert AngleCalc.boundTo180(180) == -180\n\ndef test_bound_6():\n assert AngleCalc.boundTo180(-180) == -180\n\ndef test_bound_7():\n assert AngleCalc.boundTo180(179) == 179\n\ndef test_between_1():\n assert AngleCalc.isAngleBetween(-90, -180, 110) == True\n\ndef test_between_2():\n assert AngleCalc.isAngleBetween(-90, -180, 80) == False\n\ndef test_between_3():\n assert AngleCalc.isAngleBetween(45, 0, -45) == True\n\ndef test_between_4():\n assert AngleCalc.isAngleBetween(-45, -180, 45) == False\n\ndef test_between_5():\n assert AngleCalc.isAngleBetween(-75, -20, 80) == True\n\ndef test_between_5():\n assert AngleCalc.isAngleBetween(0, 0, 0) == True\n\ndef test_between_5():\n assert AngleCalc.isAngleBetween(0, -20, 0) == False\n" }, { "alpha_fraction": 0.5441328883171082, "alphanum_fraction": 0.5742471218109131, "avg_line_length": 33.39285659790039, "blob_id": "bbd8fc96c5acd6a279d3a573e68c5618a13c0073", "content_id": "04da8387fe1121cf96d63c9520676d9ff72af1a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1926, "license_type": "no_license", "max_line_length": 98, "num_lines": 56, "path": "/sailbot_quiz.py", "repo_name": "jpatch22/sailbot_quiz", "src_encoding": "UTF-8", "text": "import math\nclass AngleCalc:\n\n \"\"\"\n Bounds the provided angle between [-180, 180) degrees.\n Ex. 360 becomes 0, 270 becomes -90, -450 becomes -90.\n @param angle Input angle in degrees.\n @return The bounded angle in degrees.\n \"\"\"\n def boundTo180(angle):\n while(angle >= 180):\n angle -= 360\n while angle < -180:\n angle += 360\n return angle\n\n \"\"\"\n Determines whether |middle_angle| is in the acute angle between the other two bounding angles.\n Note: Input angles are bounded to 180 for safety.\n Ex. -180 is between -90 and 110 but not between -90 and 80.\n @param first_angle First angle in degrees.\n @param middle_angle Middle angle in degrees.\n @param second_angle Second angle in degrees.\n @return Whether |middle_angle| is between |first_angle| and |second_angle| (exclusive).\n \"\"\"\n def isAngleBetween(first_angle, middle_angle, second_angle):\n min_clock = 0\n min_anticlock = 0\n clockwise = False\n if first_angle > second_angle:\n min_anticlock = math.fabs(first_angle) + math.fabs(second_angle)\n min_clock = 360 - min_anticlock\n elif second_angle > first_angle:\n min_clock = math.fabs(first_angle) + math.fabs(second_angle)\n min_anticlock = 360 - min_clock\n else:\n if first_angle == second_angle == middle_angle:\n return True\n else:\n return False\n\n if min_clock < min_anticlock:\n clockwise = True\n else:\n clockwise = False\n\n if not clockwise:\n if first_angle - min_anticlock<= middle_angle <= first_angle:\n return True\n else:\n return False\n else:\n if first_angle <= middle_angle <= first_angle + min_clock:\n return True\n else:\n return False\n" } ]
2
jasonkeene/playground
https://github.com/jasonkeene/playground
57887191556c91893c97cedc5f79e8a1259ec6e2
8ed7b8e18896dace509e4f33e64420cb950106d5
81225e6ff47d8a5c77ce366f2c322716d2959071
refs/heads/master
2020-04-12T06:33:40.394022
2019-02-23T23:02:35
2019-02-23T23:02:35
20,604,471
5
2
null
2014-06-07T22:43:12
2015-08-05T00:39:44
2015-08-05T19:17:30
Go
[ { "alpha_fraction": 0.6438989043235779, "alphanum_fraction": 0.7228974103927612, "avg_line_length": 43.2365608215332, "blob_id": "3ca86d9e0d3c918c877708b066cd69b251a6289e", "content_id": "607ccdf7cb9f00e6922e4a4ed57c76416e292dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4114, "license_type": "no_license", "max_line_length": 156, "num_lines": 93, "path": "/reading_list.rst", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "Fundamentals\n------------\n\n- `Communicating Sequential Processes <http://www.amazon.com/dp/0131532715/>`_\n- `Structure and Interpretation of Computer Programs <http://www.amazon.com/dp/0070004846/>`_\n\nUnix\n----\n\n- `Advanced Programming in the UNIX Environment <http://www.amazon.com/dp/0321637739/>`_\n- `The Art of UNIX Programming <http://www.amazon.com/dp/0131429019/>`_\n- `The Design of the UNIX Operating System <http://www.amazon.com/dp/B000M85BS6/>`_\n- `Unix Programming Environment <http://www.amazon.com/dp/0139376992/>`_\n- `Unix Power Tools <http://www.amazon.com/dp/0596003307/>`_\n\nNetworking\n----------\n\n- `Beej's Guide to Network Programming <http://beej.us/guide/bgnet/>`_\n- `UNIX Network Programming <http://www.amazon.com/dp/0139498761/>`_\n- `ZeroMQ - Messaging for Many Applications <http://shop.oreilly.com/product/0636920026136.do>`_\n- `Applied Network Security Monitoring: Collection, Detection, and Analysis <http://www.amazon.com/dp/0124172083/>`_\n- `Network Warrior <http://www.amazon.com/dp/1449387861/>`_\n- `Distributed Systems: Principles and Paradigms <www.amazon.com/gp/product/0132392275/>`_\n\nSecurity\n--------\n\n- `The Tangled Web: A Guide to Securing Modern Web Applications <http://www.amazon.com/dp/1593273886/>`_\n\nCrypto\n------\n\n- `Practical Cryptography <http://www.amazon.com/dp/0471223573/>`_\n- `Practical Cryptography with Go <http://gokyle.org/book/>`_\n\nMath\n----\n\n- `Calculus for the Practical Man <http://www.amazon.com/dp/1406756725/>`_\n- `Trigonometry for the Practical Man <http://www.amazon.com/dp/0442284888/>`_\n\nAlgorithms\n----------\n\n- `Mastering Algorithms with C <http://www.amazon.com/dp/1565924533/>`_\n- `Astronomical Algorithms <http://www.amazon.com/dp/0943396611/>`_\n\nCompilers/Language Design\n-------------------------\n\n- `Constructing Language Processors for Little Languages <http://www.amazon.com/dp/0471597538/>`_\n- `Compilers: Principles, Techniques, and Tools <http://www.amazon.com/dp/0321486811>`_\n\nOperating Systems\n-----------------\n\n- `Operating System Concepts <http://www.amazon.com/dp/1118063333/>`\n- `Operating Systems Design and Implementation <http://www.amazon.com/dp/0131429388/>`_\n\nConcurrency\n-----------\n\n- `The Multiprocessor Programming Revised <http://www.amazon.com/dp/0123973376>`\n\nArchitecture/Patterns/Design\n----------------------------\n\n- `Patterns of Enterprise Application Architecture <http://www.amazon.com/dp/0321127420/>`_\n- `Smalltalk Best Practice Patterns <http://www.amazon.com/dp/013476904X/>`_\n\nCultural\n--------\n\n- `The No Asshole Rule: Building a Civilized Workplace and Surviving One That Isn't <http://www.amazon.com/dp/0446526568/>`_\n\nMiscellaneous\n-------------\n\n- `The Mythical Man-Month: Essays on Software Engineering <http://www.amazon.com/dp/0201835959/>`_\n- `No Silver Bullet <http://faculty.salisbury.edu/~xswang/Research/Papers/SERelated/no-silver-bullet.pdf>`_\n- `Culture & Empire Digital Revolution <https://github.com/cultureandempire/cultureandempire.github.io/raw/master/download/cande.pdf>`_\n- `Working Effectively with Legacy Code <http://www.amazon.com/dp/0131177052>`_\n- `Refactoring: Improving the Design of Existing Code <http://www.amazon.com/dp/0201485672/>`_\n- `Coders at Work: Reflections on the Craft of Programming <http://www.amazon.com/dp/1430219483/>`_\n- `The Pragmatic Programmer <http://www.amazon.com/dp/020161622X/>`_\n- `Digital Signal Processing: A Practical Guide for Engineers and Scientists <http://www.amazon.com/dp/075067444X/>`_\n- `The Swift Programming Language <https://itunes.apple.com/us/book/swift-programming-language/id881256329>`_\n- `Using Swift with Cocoa and Objective-C <https://developer.apple.com/library/prerelease/ios/documentation/swift/conceptual/buildingcocoaapps/index.html>`_\n- `Types and Programming Languages <http://www.cis.upenn.edu/~bcpierce/tapl/>`_\n- `Advanced Topics in Types and Programming Languages <http://www.cis.upenn.edu/~bcpierce/attapl/>`_\n- `OpenStack Operations Guide <http://docs.openstack.org/ops/>`_\n- `Dynamically typed languages, Laurence Tratt <http://tratt.net/laurie/research/pubs/html/tratt__dynamically_typed_languages/>`_\n" }, { "alpha_fraction": 0.5897436141967773, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 6.800000190734863, "blob_id": "be27d38c054cab0c53ed1e5ff1986803b2063a37", "content_id": "3f3d490ab6bf61cbaa4ee045fc11652b7e3102bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/digital-fundamentals/grey-code/README.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "To run the tests:\n\n```bash\ngo test\n```\n" }, { "alpha_fraction": 0.6023622155189514, "alphanum_fraction": 0.6251968741416931, "avg_line_length": 16.8873233795166, "blob_id": "f09a337bee3e36776f7a8766537ae5f093f6f912", "content_id": "af5766ca96f2544e1b058e332e7ddfd40cc0f358", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 62, "num_lines": 71, "path": "/golang/json_and_go/json.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n)\n\ntype Message struct {\n\tName string `json:\"name\"`\n\tBody string `json:\"body\"`\n\tTime int64 `json:\"time\"`\n}\n\ntype FamilyMember struct {\n\tName string\n\tAge int\n\tParents []string\n}\n\nfunc json_test(v interface{}) {\n\tfmt.Printf(\"original type: %T\\n\", v)\n\tfmt.Printf(\"original value: %#v\\n\", v)\n\n\tbytes, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Printf(\"An error occured in marshalling! %v\\n\\n\", err)\n\t\treturn\n\t}\n\tprint(\"json bytes: \")\n\tos.Stdout.Write(bytes)\n\tprint(\"\\n\")\n\n\tvar result interface{}\n\terr = json.Unmarshal(bytes, &result)\n\tif err != nil {\n\t\tfmt.Printf(\"An error occured in unmarshalling! %v\\n\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"result type: %T\\n\", result)\n\tfmt.Printf(\"result value: %#v\\n\\n\", result)\n}\n\nfunc main() {\n\tjson_test(nil)\n\tjson_test(true)\n\tjson_test(false)\n\tjson_test(42)\n\tjson_test(42.42)\n\tjson_test(\"raw string\")\n\tjson_test(\"Hello, 世界\")\n\tjson_test([]interface{}{\n\t\t\"raw string\",\n\t\t42,\n\t\tmap[string]string{\n\t\t\t\"key\": \"value\",\n\t\t},\n\t})\n\n\tm := Message{\"Jason\", \"Hello friend!\", 1294706395881547000}\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar fm FamilyMember\n\terr = json.Unmarshal(b, &fm)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"%#v\\n\", fm)\n}\n" }, { "alpha_fraction": 0.5323880314826965, "alphanum_fraction": 0.5543022751808167, "avg_line_length": 21.816177368164062, "blob_id": "5e1b2f93d44fc6c8513d36c3903164e8ebe8b444", "content_id": "c011eda66b2178172131044125e58fa376417eca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3103, "license_type": "no_license", "max_line_length": 85, "num_lines": 136, "path": "/algorithms/other/hanoi/tower_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package hanoi_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\n\t\"github.com/jasonkeene/playground/algorithms/other/hanoi\"\n)\n\nfunc TestTower(t *testing.T) {\n\tt.Run(\"new tower\", func(t *testing.T) {\n\t\tt.Run(\"all rings start on peg A\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\n\t\t\texpected := map[string]*hanoi.Peg{\n\t\t\t\t\"A\": hanoi.NewPeg([]int{8, 7, 6, 5, 4, 3, 2, 1}),\n\t\t\t\t\"B\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"C\": hanoi.NewPeg([]int{}),\n\t\t\t}\n\n\t\t\tif !cmp.Equal(tower.Pegs, expected) {\n\t\t\t\tt.Fatal(cmp.Diff(tower.Pegs, expected))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"move entire tower from A to B\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\n\t\t\ttower.Move(8, \"A\", \"B\")\n\n\t\t\texpected := map[string]*hanoi.Peg{\n\t\t\t\t\"A\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"B\": hanoi.NewPeg([]int{8, 7, 6, 5, 4, 3, 2, 1}),\n\t\t\t\t\"C\": hanoi.NewPeg([]int{}),\n\t\t\t}\n\n\t\t\tif !cmp.Equal(tower.Pegs, expected) {\n\t\t\t\tt.Fatal(cmp.Diff(tower.Pegs, expected))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"move entire tower from A to C\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\n\t\t\ttower.Move(8, \"A\", \"C\")\n\n\t\t\texpected := map[string]*hanoi.Peg{\n\t\t\t\t\"A\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"B\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"C\": hanoi.NewPeg([]int{8, 7, 6, 5, 4, 3, 2, 1}),\n\t\t\t}\n\n\t\t\tif !cmp.Equal(tower.Pegs, expected) {\n\t\t\t\tt.Fatal(cmp.Diff(tower.Pegs, expected))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"it can move bits of the tower from A to C\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\n\t\t\ttower.Move(4, \"A\", \"C\")\n\n\t\t\texpected := map[string]*hanoi.Peg{\n\t\t\t\t\"A\": hanoi.NewPeg([]int{8, 7, 6, 5}),\n\t\t\t\t\"B\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"C\": hanoi.NewPeg([]int{4, 3, 2, 1}),\n\t\t\t}\n\n\t\t\tif !cmp.Equal(tower.Pegs, expected) {\n\t\t\t\tt.Fatal(cmp.Diff(tower.Pegs, expected))\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"everything moved to B\", func(t *testing.T) {\n\t\tt.Run(\"move back to A\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\t\t\ttower.Move(8, \"A\", \"B\")\n\n\t\t\ttower.Move(8, \"B\", \"A\")\n\n\t\t\texpected := map[string]*hanoi.Peg{\n\t\t\t\t\"A\": hanoi.NewPeg([]int{8, 7, 6, 5, 4, 3, 2, 1}),\n\t\t\t\t\"B\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"C\": hanoi.NewPeg([]int{}),\n\t\t\t}\n\n\t\t\tif !cmp.Equal(tower.Pegs, expected) {\n\t\t\t\tt.Fatal(cmp.Diff(tower.Pegs, expected))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"move to C\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\t\t\ttower.Move(8, \"A\", \"B\")\n\n\t\t\ttower.Move(8, \"B\", \"C\")\n\n\t\t\texpected := map[string]*hanoi.Peg{\n\t\t\t\t\"A\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"B\": hanoi.NewPeg([]int{}),\n\t\t\t\t\"C\": hanoi.NewPeg([]int{8, 7, 6, 5, 4, 3, 2, 1}),\n\t\t\t}\n\n\t\t\tif !cmp.Equal(tower.Pegs, expected) {\n\t\t\t\tt.Fatal(cmp.Diff(tower.Pegs, expected))\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"checkable tower\", func(t *testing.T) {\n\t\tt.Run(\"put larger disk on smaller ones\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\t\t\ttower.Check = true\n\n\t\t\ttower.Move(8, \"A\", \"C\")\n\t\t\ttower.Move(8, \"C\", \"B\")\n\t\t\ttower.Move(8, \"B\", \"A\")\n\t\t})\n\n\t\tt.Run(\"panic when forced to put larger disks on smaller ones\", func(t *testing.T) {\n\t\t\ttower := hanoi.NewTower(8)\n\t\t\ttower.Check = true\n\t\t\ttower.Move(4, \"A\", \"C\")\n\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err == nil {\n\t\t\t\t\tt.Fatal(\"expected move to panic\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\ttower.Move(4, \"A\", \"C\")\n\t\t})\n\t})\n}\n" }, { "alpha_fraction": 0.5871559381484985, "alphanum_fraction": 0.6408911943435669, "avg_line_length": 17.16666603088379, "blob_id": "2837c397bf02e40ca719c2217b45ade9ba7b0345", "content_id": "683fccf0c51860d003ec5303eb872e34d7384b5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 763, "license_type": "no_license", "max_line_length": 78, "num_lines": 42, "path": "/the-go-programming-language/ch4/src/arrays/sha.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"crypto/sha1\"\n\t\"crypto/sha256\"\n\t\"crypto/sha512\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n)\n\nvar algo string\n\nfunc main() {\n\tflag.StringVar(&algo, \"algo\", \"sha256\", \"specifies hashing algorithm to use\")\n\tflag.Parse()\n\n\tdata, err := ioutil.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"unable to read from stdin\")\n\t\tos.Exit(1)\n\t}\n\n\tswitch algo {\n\tcase \"sha1\":\n\t\thash := sha1.Sum(data)\n\t\tfmt.Printf(\"%#x\\n\", hash)\n\tcase \"sha256\":\n\t\thash := sha256.Sum256(data)\n\t\tfmt.Printf(\"%#x\\n\", hash)\n\tcase \"sha384\":\n\t\thash := sha512.Sum384(data)\n\t\tfmt.Printf(\"%#x\\n\", hash)\n\tcase \"sha512\":\n\t\thash := sha512.Sum512(data)\n\t\tfmt.Printf(\"%#x\\n\", hash)\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, \"unsupported hashing algorithm specified\")\n\t\tos.Exit(2)\n\t}\n}\n" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.5098039507865906, "avg_line_length": 24.5, "blob_id": "14758ec5d3ca86afb0a6096048608038e798a3f4", "content_id": "e6e8f7e518098d258e3a0883a5c6076c63a30a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 75, "num_lines": 18, "path": "/python-logging/basic-logging-tutorial/myapp.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import logging\n\nimport mylib\n\n\ndef main():\n logging.basicConfig(filename=\"myapp.log\", level=logging.INFO,\n format=\"%(asctime)s | %(levelname)s | %(message)s\",\n datefmt=\"%Y-%m-%d %I:%M:%S %p\",\n filemode=\"w\")\n logging.info(\"Started myapp!\")\n mylib.do_something()\n logging.warn(\"%s before you %s\", \"Leap\", \"look!\")\n logging.info(\"Done!\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5020747184753418, "alphanum_fraction": 0.5027662515640259, "avg_line_length": 28.81443214416504, "blob_id": "85948cfaf05133691526d9a26facedc0c1c1b747", "content_id": "bf99dff81ed521269a18262e28537a856f665ee3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2896, "license_type": "no_license", "max_line_length": 60, "num_lines": 97, "path": "/golang-tour/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [tour.golang.org](http://tour.golang.org/)\n\n- [x] Using the tour\n - [x] Welcome!\n - [x] Hello, 世界\n - [x] Go local\n - [x] The Go Playground\n - [x] Congratulations\n- [x] Basics\n - [x] Packages, variables, and functions.\n - [x] Packages\n - [x] Imports\n - [x] Exported names\n - [x] Functions\n - [x] Functions continued\n - [x] Multiple results\n - [x] Named return values\n - [x] Variables\n - [x] Variables with initializers\n - [x] Short variable declarations\n - [x] Basic types\n - [x] Zero values\n - [x] Type conversions\n - [x] Type inference\n - [x] Constants\n - [x] Numeric Constants\n - [x] Congratulations!\n - [x] Flow control statements: for, if, else, and switch\n - [x] For\n - [x] For continued\n - [x] For is Go's \"while\"\n - [x] Forever\n - [x] If\n - [x] If with a short statement\n - [x] If and else\n - [x] Exercise: Loops and Functions\n - [x] Switch\n - [x] Switch evaluation order\n - [x] Switch with no condition\n - [x] Defer\n - [x] Stacking defers\n - [x] Congratulations!\n - [x] More types: structs, slices, and maps.\n - [x] Pointers\n - [x] Structs\n - [x] Struct Fields\n - [x] Pointers to structs\n - [x] Struct Literals\n - [x] Arrays\n - [x] Slices\n - [x] Slicing slices\n - [x] Making slices\n - [x] Nil slices\n - [x] Adding elements to a slice\n - [x] Range\n - [x] Range continued\n - [x] Exercise: Slices\n - [x] Maps\n - [x] Map literals\n - [x] Map literals continued\n - [x] Mutating Maps\n - [x] Exercise: Maps\n - [x] Function values\n - [x] Function closures\n - [x] Exercise: Fibonacci closure\n - [x] Congratulations!\n- [x] Methods and interfaces\n - [x] Methods and interfaces\n - [x] Methods\n - [x] Methods continued\n - [x] Methods with pointer receivers\n - [x] Interfaces\n - [x] Interfaces are satisfied implicitly\n - [x] Stringers\n - [x] Exercise: Stringers\n - [x] Errors\n - [x] Exercise: Errors\n - [x] Readers\n - [x] Exercise: Readers\n - [x] Exercise: rot13Reader\n - [x] Web servers\n - [x] Exercise: HTTP Handlers\n - [x] Images\n - [x] Exercise: Images\n - [x] Congratulations!\n- [x] Concurrency\n - [x] Concurrency\n - [x] Goroutines\n - [x] Channels\n - [x] Buffered Channels\n - [x] Range and Close\n - [x] Select\n - [x] Default Selection\n - [x] Exercise: Equivalent Binary Trees\n - [x] Exercise: Equivalent Binary Trees\n - [x] Exercise: Web Crawler\n - [x] Where to Go from here...\n" }, { "alpha_fraction": 0.5541666746139526, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 29, "blob_id": "5b7c339f6bc3c471a808cae3a6431df9d0b2639a", "content_id": "9f5402b1e64a3a35967d736937329afb6a607ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 240, "license_type": "no_license", "max_line_length": 111, "num_lines": 8, "path": "/pre-commit", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ncd ~/projects/playground\nif [ \"$(cat README.md | shasum | cut -d ' ' -f 1)\" != \"$(./build_readme.py | shasum | cut -d ' ' -f 1)\" ]; then\n echo \"Readme did not match, building..\"\n ./build_readme.py > README.md\n exit 1\nfi\n" }, { "alpha_fraction": 0.8058823347091675, "alphanum_fraction": 0.8058823347091675, "avg_line_length": 41.25, "blob_id": "baf7a3d11ac17f41d471ced2c2a505ee690924fd", "content_id": "63dba1ba5893e31d4ddfc7c1aef2b234594b67b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 170, "license_type": "no_license", "max_line_length": 60, "num_lines": 4, "path": "/haskell-book/ch2/notes.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "redexes are expressions that can still be reduced\nnormal form is where and expression can no longer be reduced\n\nwhy do you not need let when defining variables in ghci?\n\n" }, { "alpha_fraction": 0.4726368188858032, "alphanum_fraction": 0.5124378204345703, "avg_line_length": 15.75, "blob_id": "ec64f964474f7a9ceb5da9f389d42797b5e12a4b", "content_id": "cc5efa9dacfc1a9c95fc51a0a69201053c05ec9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 201, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/golang-tour/basics/packages_variables_and_functions/14_type_inference.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc main() {\n a := 42\n b := 42.42\n c := 42i\n fmt.Printf(\"a is of type %T\\n\", a)\n fmt.Printf(\"b is of type %T\\n\", b)\n fmt.Printf(\"c is of type %T\\n\", c)\n}\n" }, { "alpha_fraction": 0.6926568746566772, "alphanum_fraction": 0.7030707597732544, "avg_line_length": 33.3577995300293, "blob_id": "0a69eefc5c2d3d90a0ba1fefc909c16fd4ed133a", "content_id": "3242f8857434f8f548feb6830e056624e04c1823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3753, "license_type": "no_license", "max_line_length": 100, "num_lines": 109, "path": "/python-logging/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [Logging HOWTO](https://docs.python.org/2/howto/logging.html)\n\n- [x] Basic Logging Tutorial\n - [x] When to use logging\n - [x] A simple example\n - [x] Logging to a file\n - [x] Logging from multiple modules\n - [x] Logging variable data\n - [x] Changing the format of displayed messages\n - [x] Displaying the date/time in messages\n - [x] Next Steps\n- [x] Advanced Logging Tutorial\n - [x] Logging Flow\n - [x] Loggers\n - [x] Handlers\n - [x] Formatters\n - [x] Configuring Logging\n - [x] What happens if no configuration is provided\n - [x] Configuring Logging for a Library\n- [x] Logging Levels\n - [x] Custom Levels\n- [x] Useful Handlers\n- [x] Exceptions raised during logging\n- [x] Using arbitrary objects as messages\n- [x] Optimization\n\n## [Logging Cookbook](https://docs.python.org/2/howto/logging-cookbook.html)\n\n- [x] Using logging in multiple modules\n- [x] Multiple handlers and formatters\n- [x] Logging to multiple destinations\n- [x] Configuration server example\n- [x] Sending and receiving logging events across a network\n- [x] Adding contextual information to your logging output\n - [x] Using LoggerAdapters to impart contextual information\n - [x] Using objects other than dicts to pass contextual information\n - [x] Using Filters to impart contextual information\n- [x] Logging to a single file from multiple processes\n- [x] Using file rotation\n- [x] An example dictionary-based configuration\n- [x] Inserting a BOM into messages sent to a SysLogHandler\n- [x] Implementing structured logging\n- [x] Customizing handlers with dictConfig()\n- [x] Configuring filters with dictConfig()\n\n## [logging — Logging facility for Python](https://docs.python.org/2/library/logging.html)\n\n- [ ] Logger Objects\n- [ ] Logging Levels\n- [ ] Handler Objects\n- [ ] Formatter Objects\n- [ ] Filter Objects\n- [ ] LogRecord Objects\n- [ ] LogRecord attributes\n- [ ] LoggerAdapter Objects\n- [ ] Thread Safety\n- [ ] Module-Level Functions\n- [x] Integration with the warnings module\n\n## [logging.config — Logging configuration](https://docs.python.org/2/library/logging.config.html)\n\n- [ ] Configuration functions\n- [ ] Configuration dictionary schema\n - [ ] Dictionary Schema Details\n - [ ] Incremental Configuration\n - [ ] Object connections\n - [ ] User-defined objects\n - [ ] Access to external objects\n - [ ] Access to internal objects\n - [ ] Import resolution and custom importers\n- [ ] Configuration file format\n\n## [logging.handlers — Logging handlers](https://docs.python.org/2/library/logging.handlers.html)\n\n- [ ] StreamHandler\n- [ ] FileHandler\n- [ ] NullHandler\n- [ ] WatchedFileHandler\n- [ ] RotatingFileHandler\n- [ ] TimedRotatingFileHandler\n- [ ] SocketHandler\n- [ ] DatagramHandler\n- [ ] SysLogHandler\n- [ ] NTEventLogHandler\n- [ ] SMTPHandler\n- [ ] MemoryHandler\n- [ ] HTTPHandler\n\n## [warnings — Warning control](https://docs.python.org/2/library/warnings.html)\n\n- [x] Warning Categories\n- [ ] The Warnings Filter\n - [ ] Default Warning Filters\n- [ ] Temporarily Suppressing Warnings\n- [ ] Testing Warnings\n- [ ] Updating Code For New Versions of Python\n- [ ] Available Functions\n- [ ] Available Context Managers\n\n## Other Reading\n\n- [x] https://docs.python.org/2/tutorial/stdlib2.html#logging\n- [ ] https://docs.python.org/2/library/multiprocessing.html#logging\n- [ ] https://docs.python.org/2/whatsnew/2.3.html#pep-282-the-logging-package\n- [ ] http://legacy.python.org/dev/peps/pep-0282/\n- [ ] https://docs.python.org/2/whatsnew/2.7.html#pep-391-dictionary-based-configuration-for-logging\n- [ ] http://legacy.python.org/dev/peps/pep-0391/\n- [x] https://docs.python.org/2/whatsnew/2.1.html#pep-230-warning-framework\n- [ ] http://legacy.python.org/dev/peps/pep-0230/\n" }, { "alpha_fraction": 0.38580039143562317, "alphanum_fraction": 0.43536505103111267, "avg_line_length": 12.824073791503906, "blob_id": "65e0c07a3ea98162dbdb183702d51c8c88499c6d", "content_id": "7317ba89b5bb5f2923b88da278a0202027625d59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1493, "license_type": "no_license", "max_line_length": 52, "num_lines": 108, "path": "/algorithms/graph/dfs_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n)\n\nfunc TestDFS(t *testing.T) {\n\tg := graph.Graph{\n\t\tNodes: []graph.Node{\n\t\t\t{\"y\"}, // 0\n\t\t\t{\"z\"}, // 1\n\t\t\t{\"s\"}, // 2\n\t\t\t{\"t\"}, // 3\n\t\t\t{\"x\"}, // 4\n\t\t\t{\"w\"}, // 5\n\t\t\t{\"v\"}, // 6\n\t\t\t{\"u\"}, // 7\n\t\t},\n\t\tEdges: [][]graph.Edge{\n\t\t\t{\n\t\t\t\t{4, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{0, 1, 0},\n\t\t\t\t{5, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{1, 1, 0},\n\t\t\t\t{5, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{6, 1, 0},\n\t\t\t\t{7, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{1, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{4, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{2, 1, 0},\n\t\t\t\t{5, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{3, 1, 0},\n\t\t\t\t{6, 1, 0},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar order []graph.Node\n\tgraph.DFS(g, 2, func(n graph.Node) {\n\t\torder = append(order, n)\n\t})\n\n\texpectedOrder := []graph.Node{\n\t\t{\"x\"},\n\t\t{\"y\"},\n\t\t{\"w\"},\n\t\t{\"z\"},\n\t\t{\"s\"},\n\t\t{\"v\"},\n\t\t{\"u\"},\n\t\t{\"t\"},\n\t}\n\texpectedEdges := [][]graph.Edge{\n\t\t{\n\t\t\t{4, 1, graph.TreeEdge},\n\t\t},\n\t\t{\n\t\t\t{0, 1, graph.TreeEdge},\n\t\t\t{5, 1, graph.TreeEdge},\n\t\t},\n\t\t{\n\t\t\t{1, 1, graph.TreeEdge},\n\t\t\t{5, 1, graph.ForwardEdge},\n\t\t},\n\t\t{\n\t\t\t{6, 1, graph.TreeEdge},\n\t\t\t{7, 1, graph.TreeEdge},\n\t\t},\n\t\t{\n\t\t\t{1, 1, graph.BackwardEdge},\n\t\t},\n\t\t{\n\t\t\t{4, 1, graph.CrossEdge},\n\t\t},\n\t\t{\n\t\t\t{2, 1, graph.CrossEdge},\n\t\t\t{5, 1, graph.CrossEdge},\n\t\t},\n\t\t{\n\t\t\t{3, 1, graph.BackwardEdge},\n\t\t\t{6, 1, graph.CrossEdge},\n\t\t},\n\t}\n\n\tif !cmp.Equal(g.Edges, expectedEdges) {\n\t\tt.Fatal(cmp.Diff(g.Edges, expectedEdges))\n\t}\n\tif !cmp.Equal(order, expectedOrder) {\n\t\tt.Fatal(cmp.Diff(order, expectedOrder))\n\t}\n}\n" }, { "alpha_fraction": 0.5510203838348389, "alphanum_fraction": 0.6040816307067871, "avg_line_length": 15.333333015441895, "blob_id": "f44d1b4c898dfcd4f479d516f4db1ef42146221d", "content_id": "0bba8f372553ad6937ed9dd9f99eebb18d757061", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 735, "license_type": "no_license", "max_line_length": 60, "num_lines": 45, "path": "/the-go-programming-language/ch3/src/strings/comma.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\nfunc main() {\n\tfmt.Println(Comma(\"1234.5\"))\n\tfmt.Println(Comma(\".5\"))\n\tfmt.Println(Comma(\"1234\"))\n\tfmt.Println(Comma(\"123\"))\n\tfmt.Println(Comma(\"123456\"))\n\tfmt.Println(Comma(\"-123456\"))\n\tfmt.Println(Comma(\"+123456\"))\n}\n\nfunc Comma(s string) string {\n\tvar b bytes.Buffer\n\n\tsign := 0\n\tif strings.HasPrefix(s, \"+\") || strings.HasPrefix(s, \"-\") {\n\t\tsign = 1\n\t}\n\tb.WriteString(s[:sign])\n\n\tperiod := strings.LastIndex(s, \".\")\n\tif period < 0 {\n\t\tperiod = len(s)\n\t}\n\n\tlhs := s[sign:period]\n\trc := utf8.RuneCountInString(lhs)\n\tfor i, r := range lhs {\n\t\tif i != 0 && (i-rc)%3 == 0 {\n\t\t\tb.WriteByte(',')\n\t\t}\n\t\tb.WriteRune(r)\n\t}\n\n\tb.WriteString(s[period:])\n\treturn b.String()\n}\n" }, { "alpha_fraction": 0.5632529854774475, "alphanum_fraction": 0.5768072009086609, "avg_line_length": 17.44444465637207, "blob_id": "e7c2852bb2e7a3e34d9f7e9a0e45844a49053a66", "content_id": "a0cb86013abd619bdc2b3b0b6243ded2131aaf6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 664, "license_type": "no_license", "max_line_length": 45, "num_lines": 36, "path": "/algorithms/graph/dag_topological_sort.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nfunc DAGTopologicalSort(g Graph) []int {\n\tnext := make([]int, 0, len(g.Nodes)/4)\n\n\t// compute in-degrees\n\tinDegree := make([]int, len(g.Nodes))\n\tfor _, edges := range g.Edges {\n\t\tfor _, e := range edges {\n\t\t\tinDegree[e.Target]++\n\t\t}\n\t}\n\n\t// start with nodes that have in-degree of 0\n\tfor i, degree := range inDegree {\n\t\tif degree == 0 {\n\t\t\tnext = append(next, i)\n\t\t}\n\t}\n\n\tsorted := make([]int, 0, len(g.Nodes))\n\tfor len(next) != 0 {\n\t\tn := next[0]\n\t\tnext = next[1:]\n\n\t\tsorted = append(sorted, n)\n\t\tfor _, e := range g.Edges[n] {\n\t\t\tinDegree[e.Target]--\n\t\t\tif inDegree[e.Target] == 0 {\n\t\t\t\tnext = append(next, e.Target)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn sorted\n}\n" }, { "alpha_fraction": 0.4371134042739868, "alphanum_fraction": 0.5298969149589539, "avg_line_length": 23.25, "blob_id": "a75d1a450bb596213b1260648b37a95b0c6c91d9", "content_id": "9a97e24337ba6fdb9aa69e4a32fffc0f5fa3e316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 56, "num_lines": 60, "path": "/algorithms/sort/sort_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort_test\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc testCorrectness(t *testing.T, f func([]int)) {\n\tcases := map[string][]int{\n\t\t\"empty\": []int{},\n\t\t\"single item\": []int{5},\n\t\t\"multiple items\": []int{5, 8, 16, 2, 71, -13, 61},\n\t\t\"duplicate items\": []int{5, 5, 5, 2, 71, -13, 61},\n\t\t\"all the same items\": []int{5, 5, 5, 5, 5, 5, 5},\n\t\t\"already sorted\": []int{-13, 2, 5, 8, 16, 61, 71},\n\t\t\"reversed sorted\": []int{71, 61, 16, 8, 5, 2, -13},\n\n\t\t\"extra_small_rand_8\": random(4, 8),\n\t\t\"small_rand_8\": random(64, 8),\n\t\t\"med_rand_8\": random(1<<31-1, 8),\n\t\t\"large_rand_8\": random(math.MaxInt64, 8),\n\n\t\t\"extra_small_rand_64\": random(4, 64),\n\t\t\"small_rand_64\": random(64, 64),\n\t\t\"med_rand_64\": random(1<<31-1, 64),\n\t\t\"large_rand_64\": random(math.MaxInt64, 64),\n\n\t\t\"extra_small_rand_1024\": random(4, 1024),\n\t\t\"small_rand_1024\": random(64, 1024),\n\t\t\"med_rand_1024\": random(1<<31-1, 1024),\n\t\t\"large_rand_1024\": random(math.MaxInt64, 1024),\n\t}\n\n\tfor k, v := range cases {\n\t\tt.Run(k, func(t *testing.T) {\n\t\t\texpected := make([]int, len(v))\n\t\t\tcopy(expected, v)\n\t\t\tsort.Ints(expected)\n\n\t\t\tf(v)\n\n\t\t\tif !reflect.DeepEqual(expected, v) {\n\t\t\t\tt.Errorf(`%q failed to sort:\n\tExpected: %v\n\tActual: %v`, k, expected, v)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc random(n, s int) []int {\n\ta := make([]int, s)\n\tfor i := 0; i < s; i++ {\n\t\ta[i] = rand.Intn(n)\n\t}\n\treturn a\n}\n" }, { "alpha_fraction": 0.570841908454895, "alphanum_fraction": 0.570841908454895, "avg_line_length": 29.4375, "blob_id": "7cc30611e41db8ece46a0c59bc9d5e4c1b263382", "content_id": "239c87cbcb1eaddca47b6eb80190fb414d828d4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "no_license", "max_line_length": 56, "num_lines": 16, "path": "/leetcode/path-sum-ii/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "def path_sum(node, total, parents=None):\n if parents is None:\n parents = []\n if node is None:\n return []\n parents.append(node.val)\n rtn = []\n if node.left is None and node.right is None:\n if total == sum(parents):\n return [parents]\n lpaths = rpaths = []\n if node.left:\n lpaths = path_sum(node.left, total, parents[:])\n if node.right:\n rpaths = path_sum(node.right, total, parents[:])\n return rtn + lpaths + rpaths\n" }, { "alpha_fraction": 0.7250000238418579, "alphanum_fraction": 0.7321428656578064, "avg_line_length": 17.600000381469727, "blob_id": "12653f687af21f927e8ffe75780efdcc68249e30", "content_id": "f210c6914e792173ab7ce911095eade849a4f95a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "no_license", "max_line_length": 75, "num_lines": 15, "path": "/concurrent-prime-sieve/README.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nA concurent prime sieve that I wrote based off a verbal description of the\nsieve and w/o looking at any other implementations. This will likely not be\nthe best implementation as a result ;)\n\nRun tests:\n\n```bash\ngo test\n```\n\nCalculate N primes:\n\n```bash\ngo run sieve.go -n 20\n```\n" }, { "alpha_fraction": 0.8703703880310059, "alphanum_fraction": 0.8703703880310059, "avg_line_length": 53, "blob_id": "bcd0f25310a2951e0b5ca62351218f4e165d1b2c", "content_id": "dc787cc9036227ca6f577402eb024c89f4d9945b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 54, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/cyclical-pipe/go.mod", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "module github.com/jasonkeene/playground/cyclical-pipe\n" }, { "alpha_fraction": 0.4085801839828491, "alphanum_fraction": 0.5148110389709473, "avg_line_length": 15.593220710754395, "blob_id": "d3465b9ef45ca34837149eb48d830cf4ee05a9ed", "content_id": "56f97535c504f3956f7fabfc38e2c5bbe18f3265", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 979, "license_type": "no_license", "max_line_length": 69, "num_lines": 59, "path": "/digital-fundamentals/grey-code/grey_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package grey\n\nimport \"testing\"\n\nfunc TestToGrey(t *testing.T) {\n\tfixtures := []struct{ in, out uint }{\n\t\t{0x0, 0x0},\n\t\t{0x1, 0x1},\n\t\t{0x2, 0x3},\n\t\t{0x3, 0x2},\n\t\t{0x4, 0x6},\n\t\t{0x5, 0x7},\n\t\t{0x6, 0x5},\n\t\t{0x7, 0x4},\n\t\t{0x8, 0xC},\n\t\t{0x9, 0xD},\n\t\t{0xA, 0xF},\n\t\t{0xB, 0xE},\n\t\t{0xC, 0xA},\n\t\t{0xD, 0xB},\n\t\t{0xE, 0x9},\n\t\t{0xF, 0x8},\n\t}\n\tfor _, tt := range fixtures {\n\t\tresult := ToGrey(tt.in)\n\t\tif result != tt.out {\n\t\t\tt.Errorf(\"ToGrey(%v) failed! expected: %v got: %v\", tt.in, tt.out,\n\t\t\t\tresult)\n\t\t}\n\t}\n}\n\nfunc TestFromGrey(t *testing.T) {\n\tfixtures := []struct{ in, out uint }{\n\t\t{0x0, 0x0},\n\t\t{0x1, 0x1},\n\t\t{0x3, 0x2},\n\t\t{0x2, 0x3},\n\t\t{0x6, 0x4},\n\t\t{0x7, 0x5},\n\t\t{0x5, 0x6},\n\t\t{0x4, 0x7},\n\t\t{0xC, 0x8},\n\t\t{0xD, 0x9},\n\t\t{0xF, 0xA},\n\t\t{0xE, 0xB},\n\t\t{0xA, 0xC},\n\t\t{0xB, 0xD},\n\t\t{0x9, 0xE},\n\t\t{0x8, 0xF},\n\t}\n\tfor _, tt := range fixtures {\n\t\tresult := FromGrey(tt.in)\n\t\tif result != tt.out {\n\t\t\tt.Errorf(\"FromGrey(%v) failed! expected: %v got: %v\", tt.in,\n\t\t\t\ttt.out, result)\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.4839044511318207, "alphanum_fraction": 0.4984423816204071, "avg_line_length": 15.050000190734863, "blob_id": "6137febbc3a4c981826691b5e87a34b654488896", "content_id": "baf71ecd51c248c8d166e0770f09a46d23ffe15e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 963, "license_type": "no_license", "max_line_length": 60, "num_lines": 60, "path": "/algorithms/sort/counting.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort\n\nfunc Counting(max int) func([]int) {\n\treturn func(a []int) {\n\t\tnew := rearrange(a, keysLess(keysEqual(a, max), max), max)\n\t\tfor i, v := range new {\n\t\t\ta[i] = v\n\t\t}\n\t}\n}\n\nfunc rearrange(a, less []int, max int) []int {\n\tb := make([]int, len(a))\n\tnext := make([]int, max)\n\n\tfor i := 0; i < max; i++ {\n\t\tnext[i] = less[i]\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tkey := a[i]\n\t\tb[next[key]] = key\n\t\tnext[key]++\n\t}\n\n\treturn b\n}\n\nfunc keysLess(equal []int, max int) []int {\n\tless := make([]int, len(equal))\n\tless[0] = 0\n\tfor i := 1; i < len(equal); i++ {\n\t\tless[i] = equal[i-1] + less[i-1]\n\t}\n\treturn less\n}\n\nfunc keysEqual(a []int, max int) []int {\n\tequal := make([]int, max)\n\tfor i := 0; i < len(a); i++ {\n\t\tequal[a[i]]++\n\t}\n\treturn equal\n}\n\nfunc ReallySimpleSort(a []int) {\n\tvar count int\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] == 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif count > 0 {\n\t\t\ta[i] = 0\n\t\t\tcount--\n\t\t} else {\n\t\t\ta[i] = 1\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5628279447555542, "alphanum_fraction": 0.5628279447555542, "avg_line_length": 29.67796516418457, "blob_id": "6b418780e41e41db4909cf76c41b23658ede44b5", "content_id": "f94fa15fe6ecc1664c504498868eb97b2cd5e2db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3621, "license_type": "no_license", "max_line_length": 60, "num_lines": 118, "path": "/the-go-programming-language/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [The Go Programming Language](http://www.gopl.io)\n\n - [x] Preface\n - [x] Tutorial\n - [x] Hello, World\n - [x] Command-Line Arguments\n - [x] Finding Duplicate Lines\n - [x] Animated GIFs\n - [x] Fetching a URL\n - [x] Fetching URLs Concurrently\n - [x] A Web Server\n - [x] Loose Ends\n - [x] Program Structure\n - [x] Names\n - [x] Declarations\n - [x] Variables\n - [x] Assignments\n - [x] Type Declarations\n - [x] Packages and Files\n - [x] Scope\n - [x] Basic Data Types\n - [x] Integers\n - [x] Floating-Point Numbers\n - [x] Complex Numbers\n - [x] Booleans\n - [x] Strings\n - [x] Constants\n - [ ] Composite Types\n - [x] Arrays\n - [x] Slices\n - [ ] Maps\n - [ ] Structs\n - [ ] JSON\n - [ ] Text and HTML Templates\n - [ ] Functions\n - [ ] Function Declarations\n - [ ] Recursion\n - [ ] Multiple Return Values\n - [ ] Errors\n - [ ] Function Values\n - [ ] Anonymous Functions\n - [ ] Deferred Function Calls\n - [ ] Panic\n - [ ] Recover\n - [ ] Methods\n - [ ] Method Declarations\n - [ ] Methods with a Pointer Receiver\n - [ ] Composing Types by Struct Embedding\n - [ ] Method Values and Expressions\n - [ ] Example: Bit Vector Type\n - [ ] Encapsulation\n - [ ] Interfaces\n - [ ] Interfaces as Contracts\n - [ ] Interface Types\n - [ ] Interface Satisfaction\n - [ ] Parsing Flags with `flag.Value`\n - [ ] Interface Values\n - [ ] Sorting with `sort.Interface`\n - [ ] The `http.Handler` Interface\n - [ ] The `error` Interface\n - [ ] Example: Expression Evaluator\n - [ ] Type Assertions\n - [ ] Discriminating Errors with Type Assertions\n - [ ] Querying Behaviors with Interface Type Assertions\n - [ ] Type Switches\n - [ ] Example: Token-Based XML Decoding\n - [ ] A Few Words of Advice\n - [ ] Goroutines and Channels\n - [ ] Goroutines\n - [ ] Example: Concurrent Clock Server\n - [ ] Example: Concurrent Echo Server\n - [ ] Channels\n - [ ] Looping in Parallel\n - [ ] Example: Concurrent Web Crawler\n - [ ] Multiplexing with `select`\n - [ ] Example: Concurrent Directory Traversal\n - [ ] Cancellation\n - [ ] Example: Chat Server\n - [ ] Concurrency with Shared Variables\n - [ ] Race Conditions\n - [ ] Mutual Exclusion: `sync.Mutex`\n - [ ] Read/Write Mutexes: `sync.RWMutex`\n - [ ] Memory Synchronization\n - [ ] Lazy Initialization: `sync.Once`\n - [ ] The Race Detector\n - [ ] Example: Concurrent Non-Blocking Cache\n - [ ] Goroutines and Threads\n - [ ] Packages and the Go Tool\n - [ ] Introduction\n - [ ] Import Paths\n - [ ] The Package Declaration\n - [ ] Import Declarations\n - [ ] Blank Imports\n - [ ] Packages and Naming\n - [ ] The Go Tool\n - [ ] Testing\n - [ ] The `go test` Tool\n - [ ] `Test` Functions\n - [ ] Coverage\n - [ ] `Benchmark` Functions\n - [ ] Profiling\n - [ ] `Example` Functions\n - [ ] Reflection\n - [ ] Why Reflection?\n - [ ] `reflect.Type` and `reflect.Value`\n - [ ] `Display`, a Recursive Value Pointer\n - [ ] Example: Encoding S-Expressions\n - [ ] Setting Variables with `reflect.Value`\n - [ ] Example: Decoding S-Expressions\n - [ ] Accessing Struct Field Tags\n - [ ] Displaying the Methods of a Type\n - [ ] A Word of Caution\n - [ ] Low-Level Programming\n - [ ] `unsafe.Sizeof`, `Alignof`, and `Offsetof`\n - [ ] `unsafe.Pointer`\n - [ ] Example: Deep Equivalence\n - [ ] Calling C Code with `cgo`\n - [ ] Another Word of Caution\n" }, { "alpha_fraction": 0.5061455965042114, "alphanum_fraction": 0.5111881494522095, "avg_line_length": 15.440414428710938, "blob_id": "f418e687a4d0a675c2e77767cb99468c6c658379", "content_id": "60ac2010019e276c7a65fc99ba7e3898b1ec470d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3173, "license_type": "no_license", "max_line_length": 53, "num_lines": 193, "path": "/parsers/flbconfig/lex_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package flbconfig_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\n\t\"github.com/jasonkeene/playground/parsers/flbconfig\"\n)\n\nfunc TestLex(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tinput string\n\t\texpectedTokens []flbconfig.Token\n\t}{\n\t\t\"empty\": {\n\t\t\tinput: \"\",\n\t\t\texpectedTokens: []flbconfig.Token{\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenEOF,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"extra whitespace\": {\n\t\t\tinput: `\n\t\t\t\t[section] \n\t\t\t\tkey val \n\t\t\t`,\n\t\t\texpectedTokens: []flbconfig.Token{\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenLeftBracket,\n\t\t\t\t\tValue: \"[\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenSection,\n\t\t\t\t\tValue: \"section\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenRightBracket,\n\t\t\t\t\tValue: \"]\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenKey,\n\t\t\t\t\tValue: \"key\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenValue,\n\t\t\t\t\tValue: \"val \",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenEOF,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"normal\": {\n\t\t\tinput: `\n\n[sectionA]\nkeyA1 valA1\nkeyA2 valA2\n\n[sectionB]\nkeyB1 valB1\nkeyB2 valB2\n\n`,\n\t\t\texpectedTokens: []flbconfig.Token{\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenLeftBracket,\n\t\t\t\t\tValue: \"[\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenSection,\n\t\t\t\t\tValue: \"sectionA\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenRightBracket,\n\t\t\t\t\tValue: \"]\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenKey,\n\t\t\t\t\tValue: \"keyA1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenValue,\n\t\t\t\t\tValue: \"valA1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenKey,\n\t\t\t\t\tValue: \"keyA2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenValue,\n\t\t\t\t\tValue: \"valA2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenLeftBracket,\n\t\t\t\t\tValue: \"[\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenSection,\n\t\t\t\t\tValue: \"sectionB\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenRightBracket,\n\t\t\t\t\tValue: \"]\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenKey,\n\t\t\t\t\tValue: \"keyB1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenValue,\n\t\t\t\t\tValue: \"valB1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenKey,\n\t\t\t\t\tValue: \"keyB2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenValue,\n\t\t\t\t\tValue: \"valB2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: flbconfig.TokenEOF,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tl := flbconfig.NewLexer(tc.input)\n\t\t\tl.Run()\n\n\t\t\tif !cmp.Equal(l.Tokens, tc.expectedTokens) {\n\t\t\t\tt.Error(cmp.Diff(l.Tokens, tc.expectedTokens))\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.45682889223098755, "alphanum_fraction": 0.47095760703086853, "avg_line_length": 16.216217041015625, "blob_id": "5ec92cb7818b68107ee63e02b9350f3651e9041d", "content_id": "298eb34f1054fa85d34d1a7efeaa076f07d9f142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 637, "license_type": "no_license", "max_line_length": 63, "num_lines": 37, "path": "/cpp-how-to-program/chapter9/copy.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nclass Foo {\npublic:\n Foo(int num=0) : number(num)\n {\n std::cout << \"Foo(\" << number << \")\" << std::endl;\n }\n Foo(const Foo &other) : number(other.number)\n {\n std::cout << \"copy Foo(\" << number << \")\" << std::endl;\n }\n ~Foo()\n {\n std::cout << \"~Foo(\" << number << \")\" << std::endl;\n }\n int getNumber()\n {\n return number;\n }\nprivate:\n int number;\n};\n\nvoid display(Foo f)\n{\n std::cout << \"display: \" << f.getNumber() << std::endl;\n}\n\nint main(int argc, char *argv[])\n{\n Foo f1(1);\n Foo f2(2);\n display(f1);\n Foo f3 = f2;\n return 0;\n}\n" }, { "alpha_fraction": 0.637785792350769, "alphanum_fraction": 0.6546329855918884, "avg_line_length": 26.633333206176758, "blob_id": "1d555835d0c11ad51cba5c18f0e94706bd1a5522", "content_id": "abc3aed1ecb6b70d40b3b1da070785b3c9787efc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 62, "num_lines": 30, "path": "/leetcode/minimum-path-sum/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n\ndef can_move_right(grid, position):\n new_position = move_right(position)\n return new_position[0] < len(grid[0])\n\n\ndef can_move_down(grid, position):\n new_position = move_down(position)\n return new_position[1] < len(grid)\n\n\ndef move_right(position):\n return position[0] + 1, position[1]\n\n\ndef move_down(position):\n return position[0], position[1] + 1\n\n\ndef min_path_sum(grid, position=(0, 0)):\n min_path_sums = []\n\n if can_move_right(grid, position):\n new_position = move_right(position)\n min_path_sums.append(min_path_sum(grid, new_position))\n if can_move_down(grid, position):\n new_position = move_down(position)\n min_path_sums.append(min_path_sum(grid, new_position))\n\n dsum = min(min_path_sums) if min_path_sums else 0\n return grid[position[1]][position[0]] + dsum\n" }, { "alpha_fraction": 0.7027027010917664, "alphanum_fraction": 0.7027027010917664, "avg_line_length": 13.800000190734863, "blob_id": "6a81c3d4da08a556dbdf42b1f5aaba9cfff0bea4", "content_id": "79166f7cbe69da8bdc003b232d070e3c7c767851", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/python-logging/basic-logging-tutorial/mylib.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import logging\n\n\ndef do_something():\n logging.info(\"Doing something!\")\n" }, { "alpha_fraction": 0.508348822593689, "alphanum_fraction": 0.5398886799812317, "avg_line_length": 13.184210777282715, "blob_id": "bd14eb221d8979459101523575b3971ab27ff32a", "content_id": "449df14d398bbf2c99dbf28585db4db02ed262ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 543, "license_type": "no_license", "max_line_length": 50, "num_lines": 38, "path": "/the-go-programming-language/ch4/src/slices/reverse.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"unicode/utf8\"\n)\n\nconst size = 5\n\nfunc main() {\n\ta := [size]int{1, 2, 3, 4, 5}\n\treverse(&a)\n\tfmt.Println(a)\n\n\tb := []byte(\"Hello, 世界\")\n\treverseUTF8(b)\n\tfmt.Println(string(b))\n}\n\nfunc reverse(a *[size]int) {\n\tfor i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 {\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n\nfunc reverseUTF8(d []byte) {\n\ttmp := make([]byte, len(d))\n\tread := len(d)\n\twrite := 0\n\n\tfor read > 0 {\n\t\tr, s := utf8.DecodeLastRune(d[:read])\n\t\tread -= s\n\t\twrite += utf8.EncodeRune(tmp[write:], r)\n\t}\n\n\tcopy(d, tmp)\n}\n" }, { "alpha_fraction": 0.5049833655357361, "alphanum_fraction": 0.529900312423706, "avg_line_length": 19.758621215820312, "blob_id": "0d80dbab36f8add09644a0e0054735352940caed", "content_id": "8086ac331017d8f226a2caa93ede38b19b53c42e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 602, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/zeromq-the-guide/chapter1/sink.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n void *context = zmq_ctx_new();\n\n void *puller = zmq_socket(context, ZMQ_PULL);\n zmq_bind(puller, \"tcp://*:5558\");\n\n char *start = s_recv(puller);\n free(start);\n\n int64_t start_time = s_clock();\n\n int i;\n for (i = 0; i < 100; i++) {\n char *string = s_recv(puller);\n free(string);\n printf((i / 10) * 10 == i ? \":\" : \".\");\n fflush(stdout);\n }\n printf(\"Total elapsed time: %d msec\\n\", (int)(s_clock() - start_time));\n\n zmq_close(puller);\n zmq_ctx_destroy(context);\n return 0;\n}\n" }, { "alpha_fraction": 0.43189552426338196, "alphanum_fraction": 0.513225793838501, "avg_line_length": 47.721923828125, "blob_id": "85c239ed3c816ffd71be91fd100b2f0200f2aa7a", "content_id": "cc41782d1ef3651ee51d7844b5d6b17e6ce09c26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9111, "license_type": "no_license", "max_line_length": 176, "num_lines": 187, "path": "/saltstack/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [Salt Table of Contents](http://docs.saltstack.com/en/latest/contents.html) [7acc6949c6](https://github.com/saltstack/salt/tree/7acc6949c612d32bfb2482f0194b05fd1fb60a61/doc)\n\n- [x] 1. Introduction to Salt\n - [x] 1.1. The 30 second summary\n - [x] 1.2. Simplicity\n - [x] 1.3. Parallel execution\n - [x] 1.4. Building on proven technology\n - [x] 1.5. Python client interface\n - [x] 1.6. Fast, flexible, scalable\n - [x] 1.7. Open\n - [x] 1.8. Salt Community\n - [x] 1.9. Mailing List\n - [x] 1.10. IRC\n - [x] 1.11. Follow on Github\n - [x] 1.12. Blogs\n - [x] 1.13. Example Salt States\n - [x] 1.14. Follow on ohloh\n - [x] 1.15. Other community links\n - [x] 1.16. Hack the Source\n- [x] 2. Installation\n - [x] 2.1. Quick Install\n - [x] 2.2. Platform-specific Installation Instructions\n - [x] 2.2.6. OS X\n - [x] 2.2.9. Ubuntu Installation\n - [x] 2.3. Dependencies\n - [x] 2.4. Optional Dependencies\n - [x] 2.5. Upgrading Salt\n- [ ] 3. Tutorials\n - [x] 3.1. INTRODUCTION\n - [x] 3.1.1. Salt Masterless Quickstart\n - [x] 3.1.1.1. Bootstrap Salt Minion\n - [x] 3.1.1.2. Telling Salt to Run Masterless\n - [x] 3.1.1.3. Create State Tree\n - [x] 3.2. BASICS\n - [x] 3.2.1. Standalone Minion\n - [x] 3.2.1.1. Telling Salt Call to Run Masterless\n - [x] 3.2.1.2. Running States Masterless\n - [x] 3.2.2. Opening the Firewall up for Salt\n - [x] 3.2.2.3. iptables\n - [x] 3.2.3. Whitelist communication to Master\n - [x] 3.2.4. Using cron with Salt\n - [x] 3.2.4.1. Use cron to initiate a highstate\n - [x] 3.2.5. Remote execution tutorial\n - [x] 3.2.5.1. Order your minions around\n - [x] 3.2.6. Pillar Walkthrough\n - [x] 3.2.6.1. Setting Up Pillar\n - [x] 3.2.6.2. Parameterizing States With Pillar\n - [x] 3.2.6.3. Pillar Makes Simple States Grow Easily\n - [x] 3.2.6.4. Setting Pillar Data on the Command Line\n - [x] 3.2.6.5. More On Pillar\n - [ ] 3.3. STATES\n - [ ] 3.3.1. How Do I Use Salt States?\n - [x] 3.3.1.1. It is All Just Data\n - [x] 3.3.1.2. The Top File\n - [x] 3.3.1.3. Default Data - YAML\n - [x] 3.3.1.4. Adding Configs and Users\n - [ ] 3.3.1.5. Moving Beyond a Single SLS\n - [ ] 3.3.1.6. Extending Included SLS Data\n - [ ] 3.3.1.7. Understanding the Render System\n - [ ] 3.3.1.8. Next Reading\n - [ ] 3.3.2. States tutorial, part 1 - Basic Usage\n - [ ] 3.3.2.1. Setting up the Salt State Tree\n - [ ] 3.3.2.2. Preparing the Top File\n - [ ] 3.3.2.3. Create an sls file\n - [ ] 3.3.2.4. Install the package\n - [ ] 3.3.2.5. Next steps\n - [ ] 3.3.3. States tutorial, part 2 - More Complex States, Requisites\n - [ ] 3.3.3.1. Call multiple States\n - [ ] 3.3.3.2. Require other states\n - [ ] 3.3.3.3. Next steps\n - [ ] 3.3.4. States tutorial, part 3 - Templating, Includes, Extends\n - [ ] 3.3.4.1. Templating SLS modules\n - [ ] 3.3.4.2. Using Grains in SLS modules\n - [ ] 3.3.4.3. Calling Salt modules from templates\n - [ ] 3.3.4.4. Advanced SLS module syntax\n - [ ] 3.3.4.5. Next steps\n - [ ] 3.3.5. States tutorial, part 4\n - [ ] 3.3.5.1. Salt fileserver path inheritance\n - [ ] 3.3.5.2. Environment configuration\n - [ ] 3.3.5.3. Practical Example\n - [ ] 3.3.5.4. Continue Learning\n - [ ] 3.3.6. States Tutorial, Part 5 - Orchestration with Salt\n - [ ] 3.3.6.1. The OverState System\n - [ ] 3.3.6.2. The Orchestrate Runner\n - [ ] 3.3.7. Syslog-ng usage\n - [ ] 3.3.7.1. Overview\n - [ ] 3.3.7.2. Configuration\n - [ ] 3.3.7.3. Quotation\n - [ ] 3.3.7.4. Full example\n - [ ] 3.3.7.5. Syslog_ng module functions\n - [ ] 3.3.7.6. Examples\n - [ ] 3.4. ADVANCED TOPICS\n - [ ] 3.4.1. SaltStack Walk-through\n - [ ] 3.4.1.1. Getting Started\n - [ ] 3.4.1.2. Salt States\n - [ ] 3.4.1.3. So Much More!\n - [ ] 3.4.2. MinionFS Backend Walkthrough\n - [ ] 3.4.2.1. Simple Configuration\n - [ ] 3.4.2.2. Commandline Example\n - [ ] 3.4.3. Automatic Updates / Frozen Deployments\n - [ ] 3.4.3.1. Getting Started\n - [ ] 3.4.3.2. Building and Freezing\n - [ ] 3.4.3.3. Using the Frozen Build\n - [ ] 3.4.3.4. Troubleshooting\n - [ ] 3.4.4. Multi Master Tutorial\n - [ ] 3.4.4.1. Summary of Steps\n - [ ] 3.4.4.2. Prepping a Redundant Master\n - [ ] 3.4.4.3. Configure Minions\n - [ ] 3.4.4.4. Sharing Files Between Masters\n - [ ] 3.4.5. Multi-Master-PKI Tutorial With Failover\n - [ ] 3.4.5.1. Motivation\n - [ ] 3.4.5.2. The Goal\n - [ ] 3.4.5.3. Prepping the master to sign its public key\n - [ ] 3.4.5.4. Prepping the minion to verify received public keys\n - [ ] 3.4.5.5. Multiple Masters For A Minion\n - [ ] 3.4.5.6. Testing the setup\n - [ ] 3.4.5.7. Performance Tuning\n - [ ] 3.4.5.8. How the signing and verification works\n - [x] 3.4.6. Preseed Minion with Accepted Key\n - [ ] 3.4.7. Salt Bootstrap\n - [ ] 3.4.7.1. Supported Operating Systems\n - [ ] 3.4.7.2. Installing via an Insecure One-Liner\n - [ ] 3.4.7.3. Example Usage\n - [ ] 3.4.7.4. Command Line Options\n - [ ] 3.4.8. Git Fileserver Backend Walkthrough\n - [ ] 3.4.8.1. Installing Dependencies\n - [ ] 3.4.8.2. Simple Configuration\n - [ ] 3.4.8.3. Multiple Remotes\n - [ ] 3.4.8.4. Per-remote Configuration Parameters\n - [ ] 3.4.8.5. Serving from a Subdirectory\n - [ ] 3.4.8.6. Mountpoints\n - [ ] 3.4.8.7. Using gitfs Alongside Other Backends\n - [ ] 3.4.8.8. Branches, Environments and Top Files\n - [ ] 3.4.8.9. Environment Whitelist/Blacklist\n - [ ] 3.4.8.10. Authentication\n - [ ] 3.4.8.11. Refreshing gitfs Upon Push\n - [ ] 3.4.8.12. Using Git as an External Pillar Source\n - [ ] 3.4.8.13. Why aren't my custom modules/states/etc. syncing to my Minions?\n - [ ] 3.4.9. The MacOS X (Maverick) Developer Step By Step Guide To Salt Installation\n - [ ] 3.4.9.1. The 5 Cent Salt Intro\n - [ ] 3.4.9.2. Before Digging In, The Architecture Of The Salt Cluster\n - [ ] 3.4.10. Step 1 - Configuring The Salt Master On Your Mac\n - [ ] 3.4.10.1. Install Homebrew\n - [ ] 3.4.10.2. Install Salt\n - [ ] 3.4.10.3. Create The Master Configuration\n - [ ] 3.4.11. Step 2 - Configuring The Minion VM\n - [ ] 3.4.11.1. Install VirtualBox\n - [ ] 3.4.11.2. Install Vagrant\n - [ ] 3.4.11.3. Create The Minion VM Folder\n - [ ] 3.4.11.4. Initialize Vagrant\n - [ ] 3.4.11.5. Import Precise64 Ubuntu Box\n - [ ] 3.4.11.6. Modify the Vagrantfile\n - [ ] 3.4.11.7. Checking The VM\n - [ ] 3.4.12. Step 3 - Connecting Master and Minion\n - [ ] 3.4.12.1. Creating The Minion Configuration File\n - [ ] 3.4.12.2. Preseed minion keys\n - [ ] 3.4.12.3. Modify Vagrantfile to Use Salt Provisioner\n - [ ] 3.4.12.4. Checking Master-Minion Communication\n - [ ] 3.4.13. Step 4 - Configure Services to Install On the Minion\n - [ ] 3.4.13.1. Checking the system's original state\n - [ ] 3.4.13.2. Initialize the top.sls file\n - [ ] 3.4.13.3. Create The Nginx Configuration\n - [ ] 3.4.13.4. Check Minion State\n - [ ] 3.4.13.5. Where To Go From Here\n - [ ] 3.4.14. Writing Salt Tests\n - [ ] 3.4.14.1. Getting Set Up For Tests\n - [ ] 3.4.14.2. Destructive vs Non-destructive\n - [ ] 3.4.14.3. Automated Test Runs\n - [ ] 3.5. SALT VIRT\n - [ ] 3.5.1. Salt as a Cloud Controller\n - [ ] 3.5.1.1. Setting up Hypervisors\n - [ ] 3.5.1.2. Getting Virtual Machine Images Ready\n - [ ] 3.5.1.3. Using Salt Virt\n - [ ] 3.5.1.4. Migrating Virtual Machines\n - [ ] 3.5.1.5. VNC Consoles\n - [ ] 3.5.1.6. Conclusion\n - [ ] 3.6. HALITE\n - [ ] 3.6.1. Installing and Configuring Halite\n - [ ] 3.6.1.1. Installing Halite Via Package\n - [ ] 3.6.1.2. Installing Halite Using pip\n - [ ] 3.6.1.3. Configuring Halite Permissions\n - [ ] 3.6.1.4. Configuring Halite Settings\n - [ ] 3.6.1.5. Starting Halite\n - [ ] 3.7. USING SALT AT SCALE\n - [ ] 3.7.1. Using salt at scale\n - [ ] 3.7.1.1. The Master\n - [ ] 3.7.1.2. Too few resources\n" }, { "alpha_fraction": 0.3775601089000702, "alphanum_fraction": 0.41406944394111633, "avg_line_length": 13.305732727050781, "blob_id": "15e495401fc501cdbe9f7a849ef73bcbaf2b27df", "content_id": "a4e1aa3258c7752928279f22727c360c29202d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2246, "license_type": "no_license", "max_line_length": 52, "num_lines": 157, "path": "/algorithms/graph/bfs_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n)\n\nfunc TestBFS(t *testing.T) {\n\tg := graph.Graph{\n\t\tNodes: []graph.Node{\n\t\t\t{\"r\"}, // 0\n\t\t\t{\"s\"}, // 1\n\t\t\t{\"t\"}, // 2\n\t\t\t{\"u\"}, // 3\n\t\t\t{\"v\"}, // 4\n\t\t\t{\"w\"}, // 5\n\t\t\t{\"x\"}, // 6\n\t\t\t{\"y\"}, // 7\n\t\t},\n\t\tEdges: [][]graph.Edge{\n\t\t\t{\n\t\t\t\t{1, 1, 0},\n\t\t\t\t{4, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{0, 1, 0},\n\t\t\t\t{5, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{5, 1, 0},\n\t\t\t\t{6, 1, 0},\n\t\t\t\t{3, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{2, 1, 0},\n\t\t\t\t{6, 1, 0},\n\t\t\t\t{7, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{0, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{1, 1, 0},\n\t\t\t\t{2, 1, 0},\n\t\t\t\t{6, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{5, 1, 0},\n\t\t\t\t{2, 1, 0},\n\t\t\t\t{3, 1, 0},\n\t\t\t\t{7, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{6, 1, 0},\n\t\t\t\t{3, 1, 0},\n\t\t\t},\n\t\t},\n\t}\n\n\torder1 := make([]graph.Node, 0, len(g.Nodes))\n\tgraph.BFS(g, 1, func(n graph.Node) {\n\t\torder1 = append(order1, n)\n\t})\n\torder2 := make([]graph.Node, 0, len(g.Nodes))\n\ttree := graph.BFSTree(g, 1, func(n graph.Node) {\n\t\torder2 = append(order2, n)\n\t})\n\n\texpectedOrder := []graph.Node{\n\t\t{\n\t\t\tLabel: \"s\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"r\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"w\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"v\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"t\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"x\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"u\",\n\t\t},\n\t\t{\n\t\t\tLabel: \"y\",\n\t\t},\n\t}\n\texpectedTree := &graph.BFSTreeNode{\n\t\tNode: graph.Node{\n\t\t\tLabel: \"s\",\n\t\t},\n\t\tChildren: []*graph.BFSTreeNode{\n\t\t\t{\n\t\t\t\tNode: graph.Node{\n\t\t\t\t\tLabel: \"r\",\n\t\t\t\t},\n\t\t\t\tChildren: []*graph.BFSTreeNode{\n\t\t\t\t\t{\n\t\t\t\t\t\tNode: graph.Node{\n\t\t\t\t\t\t\tLabel: \"v\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tNode: graph.Node{\n\t\t\t\t\tLabel: \"w\",\n\t\t\t\t},\n\t\t\t\tChildren: []*graph.BFSTreeNode{\n\t\t\t\t\t{\n\t\t\t\t\t\tNode: graph.Node{\n\t\t\t\t\t\t\tLabel: \"t\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tChildren: []*graph.BFSTreeNode{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNode: graph.Node{\n\t\t\t\t\t\t\t\t\tLabel: \"u\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tNode: graph.Node{\n\t\t\t\t\t\t\tLabel: \"x\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tChildren: []*graph.BFSTreeNode{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNode: graph.Node{\n\t\t\t\t\t\t\t\t\tLabel: \"y\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif !cmp.Equal(order1, expectedOrder) {\n\t\tt.Fatal(cmp.Diff(order1, expectedOrder))\n\t}\n\tif !cmp.Equal(order2, expectedOrder) {\n\t\tt.Fatal(cmp.Diff(order2, expectedOrder))\n\t}\n\tif !cmp.Equal(tree, expectedTree) {\n\t\tt.Fatal(cmp.Diff(tree, expectedTree))\n\t}\n}\n" }, { "alpha_fraction": 0.4346153736114502, "alphanum_fraction": 0.4769230782985687, "avg_line_length": 17.571428298950195, "blob_id": "db411053b4ca845e12a2e3d7dad7973ca6b5bb97", "content_id": "c3f98255c30f01785880139bc020b4e4dedc53cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 260, "license_type": "no_license", "max_line_length": 47, "num_lines": 14, "path": "/cpp-how-to-program/chapter8/nullptr.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nint main()\n{\n int *ptr1 = {};\n int *ptr2 = 0;\n int *ptr3 = nullptr;\n\n std::cout << \"ptr1: \" << ptr1 << std::endl;\n std::cout << \"ptr2: \" << ptr2 << std::endl;\n std::cout << \"ptr3: \" << ptr3 << std::endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.7475728392601013, "alphanum_fraction": 0.7475728392601013, "avg_line_length": 24.75, "blob_id": "6dfc8512f808e4bd8f29f13f415330bdfb98e1b2", "content_id": "b7798d45d570610149b86848a2fa3e11aaa9a836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 51, "num_lines": 4, "path": "/python-logging/basic-logging-tutorial/example.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import logging\n\nlogging.warn(\"This is a warning!\")\nlogging.info(\"Just some handy information here :)\")\n" }, { "alpha_fraction": 0.4079498052597046, "alphanum_fraction": 0.41631799936294556, "avg_line_length": 16.071428298950195, "blob_id": "1487afda86e5f7a15bcab49cbef47c3e9b623c13", "content_id": "c1d74fbfc07d492c472e6bf3ab190b6bb527c4b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 478, "license_type": "no_license", "max_line_length": 52, "num_lines": 28, "path": "/golang-tour/methods_and_interfaces/11_exercise_readers.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\ntype MyReader struct {}\n\nfunc (r *MyReader) Read(b []byte) (int, error) {\n i := 0\n for i = range b {\n b[i] = 'A'\n }\n return i + 1, nil\n}\n\nfunc main() {\n r := new(MyReader)\n b := make([]byte, 8)\n for {\n n, err := r.Read(b)\n if err != nil {\n fmt.Println(\"An error occured: %s\", err)\n }\n for i := 0; i < n; i++ {\n fmt.Printf(\"%c\", b[i])\n }\n print(\"\\n\")\n }\n}\n" }, { "alpha_fraction": 0.5803571343421936, "alphanum_fraction": 0.5899234414100647, "avg_line_length": 17.23255729675293, "blob_id": "eaa02a7368129631a257fc5b01903909a47d14ac", "content_id": "5c993520d9eb7220f1b8533838bf1bf1580a80b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1568, "license_type": "no_license", "max_line_length": 68, "num_lines": 86, "path": "/algorithms/graph/dag_shortest_path.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nimport (\n\t\"math\"\n)\n\ntype nextElement struct {\n\ttotal float64\n\ti int\n\tpath []int\n}\n\nfunc DAGShortestPathTopologicalSort(g Graph, start, end int) []int {\n\tshortest := make([]float64, len(g.Nodes))\n\tfor i := range shortest {\n\t\tshortest[i] = math.Inf(1)\n\t}\n\tshortest[start] = 0\n\tpred := make([]int, len(g.Nodes))\n\tfor i := range shortest {\n\t\tpred[i] = -1\n\t}\n\n\tsorted := DAGTopologicalSort(g)\n\tfor _, n := range sorted {\n\t\tfor _, e := range g.Edges[n] {\n\t\t\trelax(n, e.Target, e.Weight, shortest, pred)\n\t\t}\n\t}\n\n\tpath := []int{end}\n\tnext := end\n\tfor {\n\t\tif next == start {\n\t\t\tbreak\n\t\t}\n\t\tnext = pred[next]\n\t\tpath = append([]int{next}, path...)\n\t}\n\treturn path\n}\n\nfunc DAGShortestPathDijkstra(g Graph, start, end int) []int {\n\tnext := make([]nextElement, 0, len(g.Nodes)/4)\n\tnext = append(next, nextElement{\n\t\ti: start,\n\t})\n\n\tfor len(next) > 0 {\n\t\t// pop off nextElement\n\t\telem := next[0]\n\t\tnext = next[1:]\n\n\t\t// see if we are at the end vertex\n\t\tif elem.i == end {\n\t\t\treturn append(elem.path, elem.i)\n\t\t}\n\n\t\t// add edges to the next list\n\t\tfor _, e := range g.Edges[elem.i] {\n\t\t\tpath := make([]int, len(elem.path)+1)\n\t\t\tcopy(path, elem.path)\n\t\t\tpath[len(path)-1] = elem.i\n\t\t\tne := nextElement{\n\t\t\t\ttotal: e.Weight + elem.total,\n\t\t\t\ti: e.Target,\n\t\t\t\tpath: path,\n\t\t\t}\n\t\t\tnext = insert(next, ne)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc insert(s []nextElement, ne nextElement) []nextElement {\n\tfor i, elem := range s {\n\t\tif ne.total < elem.total {\n\t\t\ts = append(s, nextElement{})\n\t\t\tcopy(s[i+1:], s[i:])\n\t\t\ts[i] = ne\n\t\t\treturn s\n\t\t}\n\t}\n\ts = append(s, ne)\n\treturn s\n}\n" }, { "alpha_fraction": 0.46080759167671204, "alphanum_fraction": 0.49960410594940186, "avg_line_length": 18.121212005615234, "blob_id": "484bb855b8a43264e675221bea87adc832fb5206", "content_id": "c04f12aee0b5b6c4858588a0ba386bb4b5f4e24d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1263, "license_type": "no_license", "max_line_length": 78, "num_lines": 66, "path": "/haskell-book/ch3/exercises.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## Exercises: Scope\n\n1. Yes\n2. No\n3. No\n4. Yes\n\n## Exercises: Syntax Errors\n\n1. No, unless you put parens around the infix operator then the output\n should be:\n [1, 2, 3, 4, 5, 6]\n2. No, single quote is used for Char literals not for String. If you change it\n to use double quotes the output should be:\n \"<3 Haskell\"\n3. Yes, the output should be:\n \"<3 Haskell\"\n\n## Chapter Exercises\n\n### Reading syntax\n\n1. a) Valid\n b) Invalid\n c) Valid\n d) Invalid\n e) Invalid\n f) Valid\n g) Invalid\n g) Valid\n\n2. a) [6, 12, 18] (d)\n b) \"rainbow\" (c)\n c) 10 (e)\n d) \"Jules\" (a)\n e) [2, 3, 5, 6, 8, 9] (b)\n\n### Building functions\n\n1. a) f x = x ++ \"!\"\n b) f x = [x !! 4]\n c) f x = drop 9 (x ++ \"!\")\n\n2. a) f :: String\n f x = x ++ \"!\"\n b) g :: String\n g x = [x !! 4]\n c) h :: String\n h x = drop 9 (x ++ \"!\")\n\n3. f :: String -> Char\n f x = x !! 2\n\n4. f :: Int -> Char\n f x = \"Curry is awesome!\" !! x\n\n5. rvrs :: String -> String\n rvrs x = drop 9 x ++ \" \" ++ take 2 (drop 6 x) ++ \" \" ++ take 5 x\n\n6. module Reverse where\n\n rvrs :: String -> String\n rvrs x = drop 9 x ++ \" \" ++ take 2 (drop 6 x) ++ \" \" ++ take 5 x\n \n main :: IO ()\n main = print $ rvrs \"Curry is awesome\"\n" }, { "alpha_fraction": 0.5388692617416382, "alphanum_fraction": 0.5636042356491089, "avg_line_length": 21.639999389648438, "blob_id": "8807bf045606f50ba32c8f3e37b4adb4e89ececd", "content_id": "088860dbb7b13a9d878e27d951733faa15602291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 566, "license_type": "no_license", "max_line_length": 55, "num_lines": 25, "path": "/zeromq-the-guide/chapter1/hello_server.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <assert.h>\n#include <zmq.h>\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n printf(\"Starting hello world server...\\n\");\n void *context = zmq_ctx_new();\n\n // create responder\n void *responder = zmq_socket(context, ZMQ_REP);\n int rc = zmq_bind(responder, \"tcp://0.0.0.0:5555\");\n assert(rc == 0);\n\n while (1) {\n char *recv = s_recv(responder);\n printf(\"Received: %s...\\n\", recv);\n s_sleep(500); // do some work\n printf(\"Sending World\\n\");\n s_send(responder, \"World\");\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5484949946403503, "alphanum_fraction": 0.5568562150001526, "avg_line_length": 13.2380952835083, "blob_id": "ee5f0a218f27d1bcd6ab6b76200641373e465d97", "content_id": "fbb0f62abd744129a577c30a34e651e3f07528b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 598, "license_type": "no_license", "max_line_length": 44, "num_lines": 42, "path": "/the-go-programming-language/ch4/src/slices/squash_space.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\nfunc main() {\n\tvar d []byte\n\n\td = []byte(\"a bb\")\n\td = squashSpace(d)\n\tfmt.Println(string(d))\n\n\td = []byte(\" \\t \\n a \\n\\n b\")\n\td = squashSpace(d)\n\tfmt.Println(string(d))\n}\n\nfunc squashSpace(data []byte) []byte {\n\twrite := 0\n\tread := 0\n\tspace := false\n\n\tfor read < len(data) {\n\t\tr, s := utf8.DecodeRune(data[read:])\n\t\tread += s\n\n\t\tif unicode.IsSpace(r) {\n\t\t\tspace = true\n\t\t} else {\n\t\t\tif space {\n\t\t\t\tspace = false\n\t\t\t\tdata[write] = byte(' ')\n\t\t\t\twrite++\n\t\t\t}\n\t\t\twrite += utf8.EncodeRune(data[write:], r)\n\t\t}\n\t}\n\treturn data[:write]\n}\n" }, { "alpha_fraction": 0.454270601272583, "alphanum_fraction": 0.48450490832328796, "avg_line_length": 15.961538314819336, "blob_id": "6bdb55008b08e79493d06daee103ccb6ed715c49", "content_id": "c2686d67411d4de149964beeb9f087e5113bf56c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1323, "license_type": "no_license", "max_line_length": 66, "num_lines": 78, "path": "/algorithms/graph/dag_shortest_path_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n)\n\nfunc TestDAGShortestPathTopologicalSort(t *testing.T) {\n\ttestCases := map[string]func(graph.Graph, int, int) []int{\n\t\t\"topological_sort\": graph.DAGShortestPathTopologicalSort,\n\t\t\"dijkstra_sort\": graph.DAGShortestPathDijkstra,\n\t}\n\n\tfor name, f := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tg := testWeightedDAG()\n\n\t\t\tpath := f(g, 0, 4)\n\n\t\t\texpectedPath := []string{\"A\", \"C\", \"D\", \"E\"}\n\t\t\tif len(path) != len(expectedPath) {\n\t\t\t\tt.Fatalf(\"shortest path is not of the right length: %v\", path)\n\t\t\t}\n\n\t\t\tfor i, n := range path {\n\t\t\t\tif g.Nodes[n].Label != expectedPath[i] {\n\t\t\t\t\tt.Fatalf(\"shortest path is not correct: %v pos: %d\", path, i)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// DAG (direction of edges is downward:\n//\n// A\n// | \\\n// 5 3\n// | \\\n// B C\n// | / |\n// 10 8 |\n// |/ |\n// D |\n// | /\n// 1 10\n// | /\n// E\n//\nfunc testWeightedDAG() graph.Graph {\n\treturn graph.Graph{\n\t\tNodes: []graph.Node{\n\t\t\t{\"A\"}, // 0\n\t\t\t{\"B\"}, // 1\n\t\t\t{\"C\"}, // 2\n\t\t\t{\"D\"}, // 3\n\t\t\t{\"E\"}, // 4\n\t\t},\n\t\tEdges: [][]graph.Edge{\n\t\t\t{ // 0\n\t\t\t\t{1, 5, 0},\n\t\t\t\t{2, 3, 0},\n\t\t\t},\n\t\t\t{ // 1\n\t\t\t\t{3, 10, 0},\n\t\t\t},\n\t\t\t{ // 2\n\t\t\t\t{3, 8, 0},\n\t\t\t\t{4, 10, 0},\n\t\t\t},\n\t\t\t{ // 3\n\t\t\t\t{4, 1, 0},\n\t\t\t},\n\t\t\t{}, // 4\n\t\t},\n\t}\n}\n" }, { "alpha_fraction": 0.573221743106842, "alphanum_fraction": 0.6108786463737488, "avg_line_length": 12.277777671813965, "blob_id": "aa79db7f3234a94c38e27f6aebde1599e5b8531b", "content_id": "71fc5614d57f2c2404d3c165823de69dba4c3af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 38, "num_lines": 18, "path": "/zerorpc-streaming/server.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport logging\n\nimport zerorpc\n\nimport rando\n\n\ndef main():\n logging.basicConfig()\n server = zerorpc.Server(rando)\n server.bind(\"tcp://0.0.0.0:12345\")\n server.run()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.46298542618751526, "alphanum_fraction": 0.49878641963005066, "avg_line_length": 16.53191566467285, "blob_id": "1fbb1b9aa250fcbb08793930ac51dd3046d472dd", "content_id": "967d73d6fde1a3639418eb08fea4f64be3d395f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1648, "license_type": "no_license", "max_line_length": 70, "num_lines": 94, "path": "/algorithms/graph/negative_weight_cycle_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n)\n\nfunc TestNegativeWeightCycles(t *testing.T) {\n\tt.Run(\"present\", func(t *testing.T) {\n\t\tg := graph.Graph{\n\t\t\tNodes: []graph.Node{\n\t\t\t\t{\"A\"},\n\t\t\t\t{\"B\"},\n\t\t\t\t{\"C\"},\n\t\t\t\t{\"D\"},\n\t\t\t\t{\"E\"},\n\t\t\t\t{\"F\"},\n\t\t\t\t{\"G\"},\n\t\t\t},\n\t\t\tEdges: [][]graph.Edge{\n\t\t\t\t{\n\t\t\t\t\t{6, 5, 0},\n\t\t\t\t\t{2, 1, 0},\n\t\t\t\t},\n\t\t\t\t{},\n\t\t\t\t{\n\t\t\t\t\t{3, 8, 0},\n\t\t\t\t\t{1, 5, 0},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t{4, -6, 0},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t{2, -3, 0},\n\t\t\t\t\t{5, 2, 0},\n\t\t\t\t},\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t},\n\t\t}\n\n\t\tshortest, prev := graph.BellmanFord(0, g)\n\t\tbadVertices := graph.DetectNegativeWeightCycle(0, g, shortest, prev)\n\t\texpectedBadVertices := []int{1, 2, 3, 4, 5}\n\n\t\tif shortest[0] != 0 {\n\t\t\tt.Fatalf(\"shortest[0] != 0: %f\", shortest[0])\n\t\t}\n\t\tif shortest[6] != 5 {\n\t\t\tt.Fatalf(\"shortest[6] != 5: %f\", shortest[6])\n\t\t}\n\t\tif prev[0] != -1 {\n\t\t\tt.Fatalf(\"prev[0] != -1: %d\", prev[0])\n\t\t}\n\t\tif prev[6] != 0 {\n\t\t\tt.Fatalf(\"prev[6] != 0: %d\", prev[6])\n\t\t}\n\n\t\tif !cmp.Equal(badVertices, expectedBadVertices) {\n\t\t\tt.Fatal(cmp.Diff(badVertices, expectedBadVertices))\n\t\t}\n\t})\n\n\tt.Run(\"not present\", func(t *testing.T) {\n\t\tg := graph.Graph{\n\t\t\tNodes: []graph.Node{\n\t\t\t\t{\"A\"},\n\t\t\t\t{\"B\"},\n\t\t\t\t{\"C\"},\n\t\t\t},\n\t\t\tEdges: [][]graph.Edge{\n\t\t\t\t{\n\t\t\t\t\t{1, 1, 0},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t{2, 1, 0},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t{0, 1, 0},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tshortest, prev := graph.BellmanFord(0, g)\n\t\tbadVertices := graph.DetectNegativeWeightCycle(0, g, shortest, prev)\n\t\tvar expectedBadVertices []int\n\n\t\tif !cmp.Equal(badVertices, expectedBadVertices) {\n\t\t\tt.Fatal(cmp.Diff(badVertices, expectedBadVertices))\n\t\t}\n\t})\n}\n" }, { "alpha_fraction": 0.5804196000099182, "alphanum_fraction": 0.5821678042411804, "avg_line_length": 17.158729553222656, "blob_id": "8a751ebd5585dd9f7160e003a89daa798cfb5914", "content_id": "560882d80cbf2b27ab472ee1d1bcc2514e367ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1144, "license_type": "no_license", "max_line_length": 57, "num_lines": 63, "path": "/algorithms/graph/bfs.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nfunc BFS(g Graph, s int, f func(Node)) {\n\tenqueued := make([]bool, len(g.Nodes))\n\tfifo := make(chan int, len(g.Nodes))\n\tfifo <- s\n\tenqueued[s] = true\n\n\tfor len(fifo) != 0 {\n\t\tn := <-fifo\n\t\tif f != nil {\n\t\t\tf(g.Nodes[n])\n\t\t}\n\t\tfor _, e := range g.Edges[n] {\n\t\t\tif !enqueued[e.Target] {\n\t\t\t\tfifo <- e.Target\n\t\t\t\tenqueued[e.Target] = true\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype BFSTreeNode struct {\n\tNode\n\tChildren []*BFSTreeNode\n}\n\nfunc BFSTree(g Graph, s int, f func(Node)) *BFSTreeNode {\n\tvar tree *BFSTreeNode\n\tbfsTreeNodes := make([]*BFSTreeNode, len(g.Nodes))\n\ttree = &BFSTreeNode{\n\t\tNode: g.Nodes[s],\n\t}\n\tbfsTreeNodes[s] = tree\n\n\tenqueued := make([]bool, len(g.Nodes))\n\tfifo := make(chan int, len(g.Nodes))\n\tfifo <- s\n\tenqueued[s] = true\n\n\tfor len(fifo) != 0 {\n\t\tn := <-fifo\n\t\tif f != nil {\n\t\t\tf(g.Nodes[n])\n\t\t}\n\t\tfor _, e := range g.Edges[n] {\n\t\t\tif !enqueued[e.Target] {\n\t\t\t\tbfsNode := &BFSTreeNode{\n\t\t\t\t\tNode: g.Nodes[e.Target],\n\t\t\t\t}\n\t\t\t\tbfsTreeNodes[e.Target] = bfsNode\n\t\t\t\tbfsTreeNodes[n].Children = append(\n\t\t\t\t\tbfsTreeNodes[n].Children,\n\t\t\t\t\tbfsNode,\n\t\t\t\t)\n\t\t\t\tfifo <- e.Target\n\t\t\t\tenqueued[e.Target] = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tree\n}\n" }, { "alpha_fraction": 0.5443365573883057, "alphanum_fraction": 0.5559870600700378, "avg_line_length": 14.927835464477539, "blob_id": "ca2c644da059ff2239112cdfbe21deb88b42f8e4", "content_id": "b8aafeb3ea75d6853c66a0253e128adfe0e9942c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 74, "num_lines": 97, "path": "/haskell-book/ch1/exercises.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## Exercises: Equivalence\n\n1. λxy.xz\n\n Not a since z and y need to be distinct\n b since m can be replaced for x and n for y\n Not c since the second argument should not be in the body\n\n Answer: b\n\n2. λxy.xxy\n\n Not a since p is not the same as m\n Not b since the body requires three terms\n c since we just replaced x with a and y with b\n\n Answer: c, but I am a bit confused since the notation is missing a '.'\n (dot) to indicate the body\n\n3. λxyz.zx\n\n Not a since we are missing the x term in the body\n b since we just replaced z with t and z with s\n Not c since y doesn't exist in the body\n\n Answer: b\n\n## Chapter Exercises\n\n### Combinators\n\n1. yes\n2. no\n3. yes\n4. yes\n5. no\n\n### Normal Form / Divergent\n\n1. normal\n2. divergent\n3. normal\n\n### Beta Reduction\n\n1. (λabc.cba)zz(λwv.w)\n\n (λa.λb.λc.cba)zz(λw.λv.w)\n (λc.czz)(λw.λv.w)\n (λw.λv.w)zz\n z\n\n2. (λx.λy.xyy)(λa.a)b\n\n (λx.λy.xyy)(λa.a)b\n (λy.(λa.a)yy)b\n (λa.a)bb\n bb\n\n3. (λy.y)(λx.xx)(λz.zq)\n\n (λy.y)(λx.xx)(λz.zq)\n (λx.xx)(λz.zq)\n (λz.zq)(λz.zq)\n (λz.zq)q\n qq\n\n4. (λz.z)(λz.zz)(λz.zy)\n\n (λz.z)(λz.zz)(λz.zy)\n (λz.zz)(λz.zy)\n (λz.zy)(λz.zy)\n (λz.zy)y\n yy\n\n5. (λx.λy.xyy)(λy.y)y\n\n (λx.λy.xyy)(λy.y)y\n (λy.(λq.q)yy)y\n (λq.q)yy\n yy\n\n6. (λa.aa)(λb.ba)c\n\n (λa.aa)(λb.ba)c\n (λb.ba)(λb.ba)c\n (λb.ba)ac\n aac\n\n7. (λxyz.xz(yz))(λx.z)(λx.a)\n\n (λx.λy.λz.xz(yz))(λx.z)(λx.a)\n (λy.λz.(λx.z)z(yz))(λx.a)\n λz.(λx.z)z((λx.a)z)\n λz.(λx.z)z((λx.a)z)\n λz.z(λx.a)z\n λz.za\n" }, { "alpha_fraction": 0.6203703880310059, "alphanum_fraction": 0.6365740895271301, "avg_line_length": 19.0930233001709, "blob_id": "7fe783c3487e5f678c1aea22a8369db1653f9e39", "content_id": "6bb17e8b4da94a6c7439c76cb31c4abeef1a41cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 864, "license_type": "no_license", "max_line_length": 67, "num_lines": 43, "path": "/data-structures/hash-table/hash_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package hash_test\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\n\thash \"github.com/jasonkeene/playground/data-structures/hash-table\"\n)\n\nfunc TestHash(t *testing.T) {\n\th := hash.NewHash()\n\n\th.Set(\"key\", \"value\")\n\th.Set(\"key2\", \"value2\")\n\n\tif h.Get(\"key\") != \"value\" {\n\t\tt.Fatal(\"key != value\")\n\t}\n\tif h.Get(\"key2\") != \"value2\" {\n\t\tt.Fatal(\"key2 != value2\")\n\t}\n}\n\nfunc TestHashFunction(t *testing.T) {\n\tconst sample_size = 1000\n\tsample := make(map[uint64]bool, sample_size)\n\tfor i := 0; i < sample_size; i++ {\n\t\tsample[hash.HashFunc([]byte(strconv.Itoa(i)))] = true\n\t}\n\n\tif len(sample) != sample_size {\n\t\tt.Fatalf(\"hash func did not have uniqe values for each sample\")\n\t}\n\t// TODO: look into chi square test for uniform distribution\n}\n\nfunc BenchmarkHashFunction(b *testing.B) {\n\tdata := []byte(\"hello hash\")\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\thash.HashFunc(data)\n\t}\n}\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 10.399999618530273, "blob_id": "0871dc43fdea8a8be05bae914c6b3d910aed8e9e", "content_id": "aa6ace07bd8915886519cecd9226f40cee6e2b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 57, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/bitfountain-ios7/Man's Best Friend/Podfile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "platform :ios, \"7.0\"\n\ntarget \"Man's Best Friend\" do\n\nend\n" }, { "alpha_fraction": 0.39885222911834717, "alphanum_fraction": 0.5208034515380859, "avg_line_length": 12.132075309753418, "blob_id": "5b23c1649844e3827fe209f914816b2ceb02d574", "content_id": "525adc8ef91eb5d7221bedd09d1fa00c1be09a09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 697, "license_type": "no_license", "max_line_length": 54, "num_lines": 53, "path": "/haskell-book/ch2/exercises.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## Exercises: A Head Code\n\n1. 5\n2. 25\n3. 30\n4. 6\n\n## Exercises: Comprehension Check\n\n1. let half x = x / 2\n let square x = x * x\n\n2. f x = 3.14 * (x * x)\n\n## Exercises: Heal the Sick\n\n1. let area x = 3.14 * (x * x)\n2. let double x = x * 2\n3. ```\nx = 7\ny = 10\nf = x + y\n```\n\n## Exercises: Let and Where\n\n1. f = x * 3 + y where x = 3; y = 1000\n2. f = x * 5 where y = 10; x = 10 * 5 + y\n3. f = z / x + y where x = 7; y = negate x; z = y * 10\n\n## Exercises: Parentheses and Association\n\n1. Yes\n2. No\n3. Yes\n\n## Chapter Exercises\n\n1. 2 + (2 * 3) - 1\n2. 10^(1 + 1)\n3. ((2 ^ 2) * (4 ^ 5)) + 1\n\n\n1. The same\n2. The same\n3. Not the same\n4. Not the same\n5. Not the same\n\n1. 1135\n 1135\n -1110\n 1110\n" }, { "alpha_fraction": 0.5651408433914185, "alphanum_fraction": 0.5792253613471985, "avg_line_length": 21.719999313354492, "blob_id": "200f3347e1e6b8484fd2ef3593733d9363c4165d", "content_id": "175c3c41d60998a9e3f0a91788c9e07c8161d4cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 568, "license_type": "no_license", "max_line_length": 52, "num_lines": 25, "path": "/zeromq-the-guide/chapter1/hello_client.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <zmq.h>\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n printf(\"Connecting to hello world server...\\n\");\n void *context = zmq_ctx_new();\n\n // create requester\n void *requester = zmq_socket(context, ZMQ_REQ);\n zmq_connect(requester, \"tcp://localhost:5555\");\n\n for (int i = 0; i < 10; i++) {\n printf(\"Sending Hello %d...\\n\", i);\n s_send(requester, \"Hello\");\n char *recv = s_recv(requester);\n printf(\"Received %s\\n\", recv);\n }\n zmq_close(requester);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.34210526943206787, "alphanum_fraction": 0.39614662528038025, "avg_line_length": 16.882352828979492, "blob_id": "e5cf6a72a5d6c3ccc03e83f4a43064c95e4e2650", "content_id": "cf7e6e36ca4853f080a67b5895d1409b8ef87dbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2128, "license_type": "no_license", "max_line_length": 69, "num_lines": 119, "path": "/algorithms/graph/dag_topological_sort_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n)\n\nfunc TestTopologicalSort(t *testing.T) {\n\tg := testDAG()\n\n\tordered := graph.DAGTopologicalSort(g)\n\n\tcounts := make([]int, len(g.Nodes))\n\tfor i, n1 := range ordered {\n\t\tcounts[n1]++\n\t\tfor j := i + 1; j < len(ordered); j++ {\n\t\t\tn2 := ordered[j]\n\t\t\tassertNoReference(t, g, n1, n2)\n\t\t}\n\t}\n\tfor i, v := range counts {\n\t\tif v != 1 {\n\t\t\tt.Fatalf(\"vertex %d was counted %d times\", i, v)\n\t\t}\n\t}\n}\n\nfunc assertNoReference(t *testing.T, g graph.Graph, n1 int, n2 int) {\n\tfor _, e := range g.Edges[n2] {\n\t\tif e.Target == n1 {\n\t\t\tt.Fatalf(\"invalid ordering: %d contains edge to: %d\", n2, n1)\n\t\t}\n\t\tassertNoReference(t, g, n1, e.Target)\n\t}\n}\n\n// DAG (direction of edges is downward:\n//\n// 6 13 10\n// | | |\n// 1 | 2\n// | | / |\n// | 7 12\n// | |\n// | 0\n// | / |\n// 5 9\n// | |\n// 4 8\n// | /\n// 3\n// |\n// 11\n//\nfunc testDAG() graph.Graph {\n\treturn graph.Graph{\n\t\tNodes: []graph.Node{\n\t\t\t{\"pants\"}, // 0\n\t\t\t{\"chest pad\"}, // 1\n\t\t\t{\"compression shorts\"}, // 2\n\t\t\t{\"catch glove\"}, // 3\n\t\t\t{\"mask\"}, // 4\n\t\t\t{\"sweater\"}, // 5\n\t\t\t{\"t-shirt\"}, // 6\n\t\t\t{\"hose\"}, // 7\n\t\t\t{\"leg pads\"}, // 8\n\t\t\t{\"skates\"}, // 9\n\t\t\t{\"undershorts\"}, // 10\n\t\t\t{\"blocker\"}, // 11\n\t\t\t{\"cup\"}, // 12\n\t\t\t{\"socks\"}, // 13\n\t\t},\n\t\tEdges: [][]graph.Edge{\n\t\t\t{ // 0\n\t\t\t\t{5, 1, 0}, // sweater\n\t\t\t\t{9, 1, 0}, // skates\n\t\t\t},\n\t\t\t{ // 1\n\t\t\t\t{5, 1, 0}, // sweater\n\t\t\t},\n\t\t\t{ // 2\n\t\t\t\t{7, 1, 0}, // hose\n\t\t\t\t{12, 1, 0}, // cup\n\t\t\t},\n\t\t\t{ // 3\n\t\t\t\t{11, 1, 0}, // blocker\n\t\t\t},\n\t\t\t{ // 4\n\t\t\t\t{3, 1, 0}, // catch glove\n\t\t\t},\n\t\t\t{ // 5\n\t\t\t\t{4, 1, 0}, // mask\n\t\t\t},\n\t\t\t{ // 6\n\t\t\t\t{1, 1, 0}, // chest pad\n\t\t\t},\n\t\t\t{ // 7\n\t\t\t\t{0, 1, 0}, // pants\n\t\t\t},\n\t\t\t{ // 8\n\t\t\t\t{3, 1, 0}, // catch glove\n\t\t\t},\n\t\t\t{ // 9\n\t\t\t\t{8, 1, 0}, // leg pads\n\t\t\t},\n\t\t\t{ // 10\n\t\t\t\t{2, 1, 0}, // compression shorts\n\t\t\t},\n\t\t\t{}, // 11\n\t\t\t{ // 12\n\t\t\t\t{0, 1, 0}, // pants\n\t\t\t},\n\t\t\t{ // 13\n\t\t\t\t{7, 1, 0}, //hose\n\t\t\t},\n\t\t},\n\t}\n}\n" }, { "alpha_fraction": 0.5722801685333252, "alphanum_fraction": 0.5782414078712463, "avg_line_length": 13.276596069335938, "blob_id": "93ea252f28eef6125971ec527acdf2860a5b0da0", "content_id": "46edd195ed7a59d6f589c97adcfbcbd5a6471bca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 671, "license_type": "no_license", "max_line_length": 65, "num_lines": 47, "path": "/concurrent-prime-sieve/sieve.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\nvar n = flag.Int(\"n\", 1, \"count of how many primes to calculate\")\n\nfunc Primes() chan int {\n\tout := make(chan int)\n\tin := NewSieve(out)\n\tgo func() {\n\t\tfor i := 2; ; i++ {\n\t\t\tin <- i\n\t\t}\n\t}()\n\treturn out\n}\n\nfunc NewSieve(out chan int) chan int {\n\tin := make(chan int)\n\tgo sieve(in, out)\n\treturn in\n}\n\nfunc sieve(in, out chan int) {\n\tprime := <-in\n\tout <- prime\n\n\tchild_in := NewSieve(out)\n\tfor {\n\t\tpossible_prime := <-in\n\t\tif possible_prime%prime != 0 {\n\t\t\tchild_in <- possible_prime\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tprime_chan := Primes()\n\tfor i := 0; i < *n; i++ {\n\t\tfmt.Println(strconv.Itoa(<-prime_chan))\n\t}\n}\n" }, { "alpha_fraction": 0.6312848925590515, "alphanum_fraction": 0.6312848925590515, "avg_line_length": 12.769230842590332, "blob_id": "9c67ff2fed87ec156709ad42f01d8fe98be0b434", "content_id": "515ca00c5ebacfaae74e1e372a884719430c1ecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 179, "license_type": "no_license", "max_line_length": 34, "num_lines": 13, "path": "/golang-tour/basics/flow_control_statements_for_if_else_and_switch/12_defer.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc world() string {\n fmt.Println(\"Generated world\")\n return \"world\"\n}\n\nfunc main() {\n defer fmt.Println(world())\n fmt.Println(\"hello\")\n}\n" }, { "alpha_fraction": 0.6013578772544861, "alphanum_fraction": 0.6091173887252808, "avg_line_length": 16.930435180664062, "blob_id": "e605428c44234541ce94de229cd127f03b16f6a7", "content_id": "dc65facd46867bfa1c75f39d5200baedbc3d59c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2062, "license_type": "no_license", "max_line_length": 71, "num_lines": 115, "path": "/algorithms/other/hanoi/tower.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package hanoi\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n)\n\ntype Peg struct {\n\tValues []int\n}\n\nfunc NewPeg(d []int) *Peg {\n\treturn &Peg{\n\t\tValues: d,\n\t}\n}\n\nfunc (peg *Peg) Pop() int {\n\tlength := len(peg.Values)\n\tif length == 0 {\n\t\tpanic(\"popping from empty peg\")\n\t}\n\tvalue := peg.Values[length-1]\n\tpeg.Values = peg.Values[0 : length-1]\n\treturn value\n}\n\nfunc (peg *Peg) Push(value int) {\n\tpeg.Values = append(peg.Values, value)\n}\n\nfunc (peg Peg) Check() {\n\tif len(peg.Values) == 0 {\n\t\treturn\n\t}\n\tvalue := peg.Values[0]\n\tfor i := range peg.Values[1:] {\n\t\tif peg.Values[i] > value {\n\t\t\tpanic(\"peg contains disks that are out of order\")\n\t\t}\n\t\tvalue = peg.Values[i]\n\t}\n}\n\ntype Tower struct {\n\tPegs map[string]*Peg\n\tCheck bool\n}\n\nfunc NewTower(size int) *Tower {\n\ttower := &Tower{\n\t\tPegs: map[string]*Peg{\n\t\t\t\"A\": NewPeg(make([]int, 0, size)),\n\t\t\t\"B\": NewPeg(make([]int, 0, size)),\n\t\t\t\"C\": NewPeg(make([]int, 0, size)),\n\t\t},\n\t}\n\tfor i := size; i > 0; i-- {\n\t\ttower.Pegs[\"A\"].Push(i)\n\t}\n\treturn tower\n}\n\nfunc (tower *Tower) Move(count int, from, to string) {\n\tfrom_peg := tower.Pegs[from]\n\tto_peg := tower.Pegs[to]\n\tother, err := lookupOther(from, to)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tother_peg := tower.Pegs[other]\n\n\thanoi(count, from_peg, to_peg, other_peg, tower.Check)\n}\n\nfunc hanoi(count int, from, to, other *Peg, check bool) {\n\tif count > 1 {\n\t\t// move sub problem from \"from\" to \"other\"\n\t\thanoi(count-1, from, other, to, check)\n\t}\n\n\t// work is done here\n\tvalue := from.Pop()\n\tto.Push(value)\n\n\t// optionally check state of pegs\n\tif check {\n\t\tfrom.Check()\n\t\tto.Check()\n\t\tother.Check()\n\t}\n\n\tif count > 1 {\n\t\t// move sub problem from \"other\" to \"to\"\n\t\thanoi(count-1, other, to, from, check)\n\t}\n}\n\nfunc lookupOther(from, to string) (string, error) {\n\tpeg_names := map[string]bool{\n\t\t\"A\": true,\n\t\t\"B\": true,\n\t\t\"C\": true,\n\t}\n\tdelete(peg_names, from)\n\tdelete(peg_names, to)\n\tif len(peg_names) != 1 {\n\t\treturn \"\", fmt.Errorf(\"bad length for peg_names: %d\", len(peg_names))\n\t}\n\tfor k := range peg_names {\n\t\treturn k, nil\n\t}\n\treturn \"\", errors.New(\"no peg_name available\")\n}\n" }, { "alpha_fraction": 0.7416666746139526, "alphanum_fraction": 0.7416666746139526, "avg_line_length": 41.35293960571289, "blob_id": "c9ba78947ef2799bc5a184506c6dfc1ccce308d5", "content_id": "3f710b7184d52fb652aee4c88503c82a652c1d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 723, "license_type": "no_license", "max_line_length": 78, "num_lines": 17, "path": "/haskell-book/ch1/notes.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "- Referential transparency is what I previously knew as \"purity\" of a function\n- Purity is overloaded in this space to also mean that programs written in the\n language are a structure of lambda expressions.\n- Pseudo BNF:\n Expression := Expression\n | Variable\n | Numerical Constants\n | Abstraction (functions)\n Abstraction := Head Body\n Head := 𝜆 Variable\n Body := Expression\n- Beta Reduction is kinda like evaluation in other languages\n- Free variables are locals in other languages\n- Alpha equivalency does not apply to free variables\n- Multiple args are accomplished via currying\n- Combinators have no free variables\n- Other methods of evaulation vs left to right, top to bottom\n" }, { "alpha_fraction": 0.8309859037399292, "alphanum_fraction": 0.8309859037399292, "avg_line_length": 22.66666603088379, "blob_id": "91a3c160f905a557c5d58a06b9a2c5e9e49557bb", "content_id": "90dbceefa46c30114a4184ea3a8d37c70eb18203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 71, "license_type": "no_license", "max_line_length": 36, "num_lines": 3, "path": "/leetcode/minimum-path-sum/pytest.ini", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "[pytest]\npython_classes=Test Describe Context\npython_functions=test it\n" }, { "alpha_fraction": 0.547002911567688, "alphanum_fraction": 0.5490731000900269, "avg_line_length": 37.08960723876953, "blob_id": "3e96139d85a597a04fc72d6d8d234def0aafc8c7", "content_id": "886b46814781185149ca84dfeeeaa3468afa44ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10643, "license_type": "no_license", "max_line_length": 64, "num_lines": 279, "path": "/zeromq-the-guide/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [ØMQ - The Guide](http://zguide.zeromq.org/page:all)\n\n - [x] **Preface**\n - [x] ØMQ in a Hundred Words\n - [x] How It Began\n - [x] The Zen of Zero\n - [x] Audience\n - [x] Acknowledgements\n - [x] **Chapter 1 - Basics**\n - [x] Fixing the World\n - [x] Starting Assumptions\n - [x] Getting the Examples\n - [x] Ask and Ye Shall Receive\n - [x] A Minor Note on Strings\n - [x] Version Reporting\n - [x] Getting the Message Out\n - [x] Divide and Conquer\n - [x] Programming with ØMQ\n - [x] Getting the Context Right\n - [x] Making a Clean Exit\n - [x] Why We Needed ØMQ\n - [x] Socket Scalability\n - [x] Upgrading from ØMQ v2.2 to ØMQ v3.2\n - [x] Compatible Changes\n - [x] Incompatible Changes\n - [x] Suggested Shim Macros\n - [x] Warning: Unstable Paradigms!\n - [ ] **Chapter 2 - Sockets and Patterns**\n - [x] The Socket API\n - [x] Plugging Sockets into the Topology\n - [x] Sending and Receiving Messages\n - [x] Unicast Transports\n - [x] ØMQ is Not a Neutral Carrier\n - [x] I/O Threads\n - [x] Messaging Patterns\n - [x] High-Level Messaging Patterns\n - [x] Working with Messages\n - [x] Handling Multiple Sockets\n - [x] Multipart Messages\n - [x] Intermediaries and Proxies\n - [x] The Dynamic Discovery Problem\n - [x] Shared Queue (DEALER and ROUTER sockets)\n - [x] ØMQ's Built-In Proxy Function\n - [x] Transport Bridging\n - [ ] Handling Errors and ETERM\n - [ ] Handling Interrupt Signals\n - [ ] Detecting Memory Leaks\n - [ ] Multithreading with ØMQ\n - [ ] Signaling Between Threads (PAIR Sockets)\n - [ ] Node Coordination\n - [ ] Zero-Copy\n - [ ] Pub-Sub Message Envelopes\n - [ ] High-Water Marks\n - [ ] Missing Message Problem Solver\n - [ ] **Chapter 3 - Advanced Request-Reply Patterns**\n - [ ] The Request-Reply Mechanisms\n - [ ] The Simple Reply Envelope\n - [ ] The Extended Reply Envelope\n - [ ] What's This Good For?\n - [ ] Recap of Request-Reply Sockets\n - [ ] Request-Reply Combinations\n - [ ] The REQ to REP Combination\n - [ ] The DEALER to REP Combination\n - [ ] The REQ to ROUTER Combination\n - [ ] The DEALER to ROUTER Combination\n - [ ] The DEALER to DEALER Combination\n - [ ] The ROUTER to ROUTER Combination\n - [ ] Invalid Combinations\n - [ ] Exploring ROUTER Sockets\n - [ ] Identities and Addresses\n - [ ] ROUTER Error Handling\n - [ ] The Load Balancing Pattern\n - [ ] ROUTER Broker and REQ Workers\n - [ ] ROUTER Broker and DEALER Workers\n - [ ] A Load Balancing Message Broker\n - [ ] A High-Level API for ØMQ\n - [ ] Features of a Higher-Level API\n - [ ] The CZMQ High-Level API\n - [ ] The Asynchronous Client/Server Pattern\n - [ ] Worked Example: Inter-Broker Routing\n - [ ] Establishing the Details\n - [ ] Architecture of a Single Cluster\n - [ ] Scaling to Multiple Clusters\n - [ ] Federation Versus Peering\n - [ ] The Naming Ceremony\n - [ ] Prototyping the State Flow\n - [ ] Prototyping the Local and Cloud Flows\n - [ ] Putting it All Together\n - [ ] **Chapter 4 - Reliable Request-Reply Patterns**\n - [ ] What is \"Reliability\"?\n - [ ] Designing Reliability\n - [ ] Client-Side Reliability (Lazy Pirate Pattern)\n - [ ] Basic Reliable Queuing (Simple Pirate Pattern)\n - [ ] Robust Reliable Queuing (Paranoid Pirate Pattern)\n - [ ] Heartbeating\n - [ ] Shrugging It Off\n - [ ] One-Way Heartbeats\n - [ ] Ping-Pong Heartbeats\n - [ ] Heartbeating for Paranoid Pirate\n - [ ] Contracts and Protocols\n - [ ] Service-Oriented Reliable Queuing (Majordomo Pattern)\n - [ ] Asynchronous Majordomo Pattern\n - [ ] Service Discovery\n - [ ] Idempotent Services\n - [ ] Disconnected Reliability (Titanic Pattern)\n - [ ] High-Availability Pair (Binary Star Pattern)\n - [ ] Detailed Requirements\n - [ ] Preventing Split-Brain Syndrome\n - [ ] Binary Star Implementation\n - [ ] Binary Star Reactor\n - [ ] Brokerless Reliability (Freelance Pattern)\n - [ ] Model One: Simple Retry and Failover\n - [ ] Model Two: Brutal Shotgun Massacre\n - [ ] Model Three: Complex and Nasty\n - [ ] Conclusion\n - [ ] **Chapter 5 - Advanced Pub-Sub Patterns**\n - [ ] Pros and Cons of Pub-Sub\n - [ ] Pub-Sub Tracing (Espresso Pattern)\n - [ ] Last Value Caching\n - [ ] Slow Subscriber Detection (Suicidal Snail Pattern)\n - [ ] High-Speed Subscribers (Black Box Pattern)\n - [ ] Reliable Pub-Sub (Clone Pattern)\n - [ ] Centralized Versus Decentralized\n - [ ] Representing State as Key-Value Pairs\n - [ ] Getting an Out-of-Band Snapshot\n - [ ] Republishing Updates from Clients\n - [ ] Working with Subtrees\n - [ ] Ephemeral Values\n - [ ] Using a Reactor\n - [ ] Adding the Binary Star Pattern for Reliability\n - [ ] The Clustered Hashmap Protocol\n - [ ] Building a Multithreaded Stack and API\n - [ ] **Chapter 6 - The ØMQ Community**\n - [ ] Architecture of the ØMQ Community\n - [ ] How to Make Really Large Architectures\n - [ ] Psychology of Software Architecture\n - [ ] The Importance of Contracts\n - [ ] Eat Me\n - [ ] The Process\n - [ ] Crazy, Beautiful, and Easy\n - [ ] Stranger, Meet Stranger\n - [ ] Infinite Property\n - [ ] Care and Feeding\n - [ ] The ØMQ Process: C4\n - [ ] Language\n - [ ] Goals\n - [ ] Preliminaries\n - [ ] Licensing and Ownership\n - [ ] Patch Requirements\n - [ ] Development Process\n - [ ] Creating Stable Releases\n - [ ] Evolution of Public Contracts\n - [ ] Project Administration\n - [ ] A Real-Life Example\n - [ ] Git Branches Considered Harmful\n - [ ] Simplicity Versus Complexity\n - [ ] Change Latency\n - [ ] Learning Curve\n - [ ] Cost of Failure\n - [ ] Up-front Coordination\n - [ ] Scalability\n - [ ] Surprise and Expectations\n - [ ] Economics of Participation\n - [ ] Robustness in Conflict\n - [ ] Guarantees of Isolation\n - [ ] Visibility\n - [ ] Conclusions\n - [ ] Designing for Innovation\n - [ ] The Tale of Two Bridges\n - [ ] How ØMQ Lost Its Road Map\n - [ ] Trash-Oriented Design\n - [ ] Complexity-Oriented Design\n - [ ] Simplicity Oriented Design\n - [ ] Burnout\n - [ ] Patterns for Success\n - [ ] The Lazy Perfectionist\n - [ ] The Benevolent Tyrant\n - [ ] The Earth and Sky\n - [ ] The Open Door\n - [ ] The Laughing Clown\n - [ ] The Mindful General\n - [ ] The Social Engineer\n - [ ] The Constant Gardener\n - [ ] The Rolling Stone\n - [ ] The Pirate Gang\n - [ ] The Flash Mob\n - [ ] The Canary Watcher\n - [ ] The Hangman\n - [ ] The Historian\n - [ ] The Provocateur\n - [ ] The Mystic\n - [ ] **Chapter 7 - Advanced Architecture using ØMQ**\n - [ ] Message-Oriented Pattern for Elastic Design\n - [ ] Step 1: Internalize the Semantics\n - [ ] Step 2: Draw a Rough Architecture\n - [ ] Step 3: Decide on the Contracts\n - [ ] Step 4: Write a Minimal End-to-End Solution\n - [ ] Step 5: Solve One Problem and Repeat\n - [ ] Unprotocols\n - [ ] Protocols Without The Goats\n - [ ] Contracts Are Hard\n - [ ] How to Write Unprotocols\n - [ ] Why use the GPLv3 for Public Specifications?\n - [ ] Using ABNF\n - [ ] The Cheap or Nasty Pattern\n - [ ] Error Handling\n - [ ] Serializing Your Data\n - [ ] Abstraction Level\n - [ ] ØMQ Framing\n - [ ] Serialization Languages\n - [ ] Serialization Libraries\n - [ ] Handwritten Binary Serialization\n - [ ] Code Generation\n - [ ] Transferring Files\n - [ ] State Machines\n - [ ] Authentication Using SASL\n - [ ] Large-Scale File Publishing: FileMQ\n - [ ] Why make FileMQ?\n - [ ] Initial Design Cut: the API\n - [ ] Initial Design Cut: the Protocol\n - [ ] Building and Trying FileMQ\n - [ ] Internal Architecture\n - [ ] Public API\n - [ ] Design Notes\n - [ ] Configuration\n - [ ] File Stability\n - [ ] Delivery Notifications\n - [ ] Symbolic Links\n - [ ] Recovery and Late Joiners\n - [ ] Test Use Case: The Track Tool\n - [ ] Getting an Official Port Number\n - [ ] **Chapter 8 - A Framework for Distributed Computing**\n - [ ] Design for The Real World\n - [ ] The Secret Life of WiFi\n - [ ] Why Mesh Isn't Here Yet\n - [ ] Some Physics\n - [ ] What's the Current Status?\n - [ ] Conclusions\n - [ ] Discovery\n - [ ] Service Discovery\n - [ ] Network Discovery\n - [ ] The Use Case\n - [ ] Technical Requirements\n - [ ] A Self-Healing P2P Network in 30 Seconds\n - [ ] Preemptive Discovery over Raw Sockets\n - [ ] Cooperative Discovery Using UDP Broadcasts\n - [ ] Multiple Nodes on One Device\n - [ ] Designing the API\n - [ ] More About UDP\n - [ ] Spinning Off a Library Project\n - [ ] Point-to-Point Messaging\n - [ ] UDP Beacon Framing\n - [ ] True Peer Connectivity (Harmony Pattern)\n - [ ] Detecting Disappearances\n - [ ] Group Messaging\n - [ ] Testing and Simulation\n - [ ] On Assertions\n - [ ] On Up-Front Testing\n - [ ] The Zyre Tester\n - [ ] Test Results\n - [ ] Tracing Activity\n - [ ] Dealing with Blocked Peers\n - [ ] Distributed Logging and Monitoring\n - [ ] A Plausible Minimal Implementation\n - [ ] Protocol Assertions\n - [ ] Binary Logging Protocol\n - [ ] Content Distribution\n - [ ] Writing the Unprotocol\n - [ ] Example Zyre Application\n - [ ] Conclusions\n - [ ] **Postface**\n - [ ] Tales from Out There\n - [ ] Rob Gagnon's Story\n - [ ] Tom van Leeuwen's Story\n - [ ] Michael Jakl's Story\n - [ ] Vadim Shalts's Story\n - [ ] How This Book Happened\n - [ ] Removing Friction\n - [ ] Licensing\n" }, { "alpha_fraction": 0.628893256187439, "alphanum_fraction": 0.6437683701515198, "avg_line_length": 37.39741134643555, "blob_id": "d0b868834e431668aa8a7505b9fcc0d06729161a", "content_id": "ffaaf2b435c9f525a22999fd0582cc2acc5c5831", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20773, "license_type": "no_license", "max_line_length": 75, "num_lines": 541, "path": "/bitfountain-ios7/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "- [x] Section 1 - Your First App!\n - [x] Introduction\n - [x] Opening XCode\n - [x] Label Use\n - [x] Buttons\n - [x] Colors\n - [x] UITextField\n - [x] UINavigationController\n- [x] Section 2 - Resources\n - [x] Resources\n- [x] Section 3 - Variables\n - [x] What is A Variable?\n - [x] Intro to Commenting\n - [x] Operations on Variables\n - [x] Logging into the Console\n - [x] Valid Variable Names\n - [x] Tokens\n - [x] Converting Units Part 1\n - [x] Converting Units Part 2\n- [x] Section 4 - Challenge 1: Age of Laika\n - [x] Age of Laika\n - [x] Age of Laika Part 1\n - [x] Age of Laika Part 2\n- [x] Section 5 - If Statements\n - [x] If Statements\n - [x] BOOLS\n - [x] If Statements Cont'd\n- [x] Section 6 - Challenge 2: For Loops\n - [x] Age of Laika Control Flow\n - [x] For Loops\n - [x] For Loops Part 2\n- [x] Section 7 - Challenge 3: 99 Sodas\n - [x] 99 Sodas\n- [x] Section 8 - Intro to Object Oriented Programming\n - [x] Create A New Project\n - [x] What are Classes and Objects\n - [x] Make Your Own Class\n - [x] Header File\n - [x] Implementation File\n- [x] Section 9 - Properties\n - [x] Properties\n - [x] Instantiating an Object\n - [x] Import\n - [x] Instantiating an Object Cont'd\n - [x] Setting Properties\n- [x] Section 10 - Methods\n - [x] Methods\n - [x] Implementing our Methods\n - [x] Methods with Arguements\n - [x] Self Properties\n - [x] Self Methods\n - [x] Methods with Multiple Arguments\n - [x] Methods with Return Values\n - [x] Review (weekly recap)\n- [x] Section 11 - Challenge 4: Methods\n - [x] The Challenge\n - [x] Methods Solution\n- [x] Section 12 - Classes\n - [x] ADD SOME FLARE (UI ELEMENTS)\n - [x] Multiple Dogs\n - [x] NSMutableArray\n - [x] Documentation\n - [x] Multiple Dogs Continued\n- [x] Section 13 - Extra Credit: Animations\n - [x] EC: Animation (strictly for Extra Credit)\n- [x] Section 14 - Challenge 5: Debug Recurring Dog\n - [x] The Challenge\n - [x] Recuring Dog Pictures\n- [x] Section 15 - Inheritance\n - [x] Inheritance\n - [x] Subclassing MBFDog\n - [x] Implementing a Superclass Method\n - [x] Super\n- [x] Section 16 - Object Continued\n - [x] The Difference Between Objects and Primitives\n - [x] NSString\n - [x] Iterating through an Array\n - [x] What's Really Going On Properties\n- [x] Section 17 - Pirate Adventure Assignment: Prereq's\n - [x] NSArray Initializer and Embedded Array\n - [x] CGPoint\n - [x] Embedded If Statements\n - [x] Buttons and AlertViews\n - [x] Property of a Custom Class\n - [x] Introduction to nil\n- [x] Section 18 - Pirate Adventure Assignment\n - [x] Pirate Assignment\n - [x] Pirate Game Introduction\n- [x] Section 19 - Pirate Adventure Solutions: Parts 1 & 2\n - [x] Pirate Storyboard Setup\n - [x] Hooking up the View\n - [x] Creating our tile object\n - [x] Factory Object\n - [x] Factory Explained\n - [x] Setting up the Initial Tile\n - [x] Hiding our Buttons Dynamically\n - [x] Navigating Between Tiles\n - [x] Add a Story and Images\n- [x] Section 20 - A Review\n - [x] Review (week2 wrap)\n - [x] You did it!\n- [x] Section 21 - Pirate Adventure Solutions: Part 3\n - [x] Weapon and Armor Class\n - [x] Creating a Character Class\n - [x] Create a Character\n - [x] Adding a Character to the Application\n- [ ] Section 22 - Pirate Adventure Solutions: Part 4\n - [ ] Updating the Tile Model\n - [ ] Implementing our Action Changes\n - [ ] Finishing our Action Changes\n - [ ] Create a Boss\n - [ ] Implementing our Boss\n - [ ] Alerting the User\n - [ ] Clean up and a Reset Button\n- [ ] Section 23 - Pirate Adventure Wrap Up\n - [ ] Pirate Assignment Solution\n- [ ] Section 24 - Terminal and Git\n - [ ] Terminal\n - [ ] Git Part 1\n - [ ] Git Part 2\n- [ ] Section 25 - Introduction to MVC\n - [ ] MVC Overview\n - [ ] Communication between the View Controller and Model/Views\n - [ ] Communication from the View and Model to the View Controller\n- [ ] Section 26 - Introduction to UITableView\n - [ ] Practice Using UITableView\n - [ ] New Project\n - [ ] A Quick Look at our Data Source\n - [ ] NsIndexPath\n - [ ] NSIndexPath Cont'd\n - [ ] Having our TableViewDisplayour Model\n - [ ] Model\n- [ ] Section 27 - Third Party Library\n - [ ] NSDictionary\n - [ ] Utilizing our new Model\n - [ ] NSNumber\n - [ ] #define\n - [ ] Literals Review\n - [ ] Class Methods\n- [ ] Section 28 - Review\n - [ ] Section 3 Review\n- [ ] Section 29 - Challenge 6: UITableViewController\n - [ ] The Challenge\n - [ ] UITableViewController Solution\n- [ ] Section 30 - Models and Space Object\n - [ ] Making a SpaceObject Class\n - [ ] Literals Cont'd\n - [ ] Custom Initializers\n - [ ] Lets Create and Use Space Objects\n- [ ] Section 31 - Challenge 7: User Data Model\n - [ ] The Challenge: User Data Model\n - [ ] Solution Part 1\n - [ ] Solution Part 2\n- [ ] Section 32 - Transitioning to a Second View Controller\n - [ ] UINavigationController\n - [ ] Push Segue\n - [ ] Setting up our Second View Controller\n- [ ] Section 33 - UIScrollView\n - [ ] UIScrollView - A Closer Look\n - [ ] Setting up our UIScrollView\n - [ ] Zooming\n - [ ] Setting our ViewController as a Delegate\n- [ ] Section 34 - Challenge 8: UIScrollView\n - [ ] The Challenge\n - [ ] UIScrollView Solution\n- [ ] Section 35 - Passing Data between View Controllers\n - [ ] Passing Data Forward\n - [ ] id and introspection\n - [ ] Implementing Introspection and Passing the Data\n - [ ] Passing information to Proxy Properties instead of Outlets\n- [ ] Section 36 - Challenge 9: Passing Data\n - [ ] The Challenge\n - [ ] Passing Information to another ViewController Solution\n- [ ] Section 37 - Displaying our SpaceData\n - [ ] Adding another UITableView Part 1\n - [ ] Adding Another UITable Part 2\n - [ ] Writing the Logic behind our Space Data Table View Controller\n - [ ] Adding a Method from our UITableViewDelegate\n - [ ] Performing the Segue\n- [ ] Section 38 - Challenge 10: UITableView\n - [ ] The Challenge\n - [ ] UITableView Solution\n- [ ] Section 39 - Review\n - [ ] Review\n- [ ] Section 40 - Introduction to Protocols and Delegation\n - [ ] Chance of us Discovering a New Planet\n - [ ] Designing our New View Controller\n - [ ] One more Segue!\n - [ ] Protocols in Objective-C\n- [ ] Section 41 - Implementing our Own Protocls\n - [ ] Our Own Protocol Part 1\n - [ ] Our Own Protocol Part 2\n - [ ] Our Own Protocol Part 3\n - [ ] Our Own Protocol Part 4\n - [ ] Our Own Protocol Part 5\n- [ ] Section 42 - Finishing Touches on Protcols\n - [ ] Reload Data\n - [ ] Loose Ends\n - [ ] Solution - No More Mercury\n - [ ] Lazy Instantiation\n- [ ] Section 43 - Challenge 11: Protocols and Delegation\n - [ ] The Challenge\n - [ ] Protocols and Delegation Solution\n - [ ] Protocols and Delegation Extra Credit\n- [ ] Section 44 - Intro to Persistence\n - [ ] Persistence Overview\n - [ ] NSUserDefaults\n- [ ] Section 45 - Data Persistence NSUserDefaults\n - [ ] Getting our Space Objects into Tip Top Property List Shape\n - [ ] Back to our NSUserDefaults\n - [ ] Retrieving our Data\n - [ ] Deleting some Cells from out UITableView\n- [ ] Section 46 - Challenge 12: NSUserDefaults, Segues and Protocols\n - [ ] The Challenge\n - [ ] NSUser Defaults, Segues and Protocols Solution Part 1\n - [ ] NSUser Defaults, Segues and Protocols Solution Part 2\n - [ ] NSUser Defaults, Segues and Protocols Solution Part 3\n - [ ] NSUser Defaults, Segues and Protocols Solution Part 4\n - [ ] NSUser Defaults, Segues and Protocols Solution Part 5\n - [ ] NSUser Defaults, Segues and Protocols Solution Part 6\n- [ ] Section 47 - Review\n - [ ] Review\n- [ ] Section 48 - Overdue Task List Assignment: Prereq's\n - [ ] .PCH File\n - [ ] TextView and the Keyboard\n - [ ] NSDate, Datepicker, NSDataFormatter and TimeInterval\n - [ ] Insert and Remove Objects from a NSMutableArray\n- [ ] Section 49 - Overdue Task List Assignment\n - [ ] Overdue Task List Assignment\n- [ ] Section 50 - Overdue Task List Assignment: Part 1\n - [ ] Setting up the Storyboard Part 1\n - [ ] Setting up the Storyboard Part 2\n - [ ] IBOutlets\n- [ ] Section 51 - Overdue Task List Assignment: Part 2\n - [ ] Setup our #defines\n - [ ] Creating a Task Model\n - [ ] Implement the Custom Initializers\n- [ ] Section 52 - Overdue Task List Assignment: Part 3\n - [ ] Protocol for the Cancel and Add Task Buttons\n - [ ] Call the Delegate Methods in the Cancel and AddTask Actions\n - [ ] An Array to hold the Tasks\n - [ ] Implementing the Delegate Methods and Saving the Task\n - [ ] Segue to the AddTaskViewController\n- [ ] Section 53 - Overdue Task List Assignment: Part 4\n - [ ] Access NSUserDefaults to Setup the taskObjects Array\n- [ ] Section 54 - Overdue Task List Assignment: Part 5\n - [ ] Setup the ViewController's TableViewDataSource\n- [ ] Section 55 - Overdue Task List Assignment: Part 6\n - [ ] Color Coding the UiTableViewCells\n - [ ] Completing a Task\n- [ ] Section 56 - Overdue Task List Assignment: Part 7\n - [ ] Delete a Task\n- [ ] Section 57 - Overdue Task List Assignment: Part 8\n - [ ] Displaying Information in the DetailViewController\n- [ ] Section 58 - Overdue Task List Assignment: Part 9\n - [ ] Reorder Tasks\n - [ ] Persisting the Reorder\n- [ ] Section 59 - Overdue Task List Assignment: Part 10\n - [ ] Setting up the EditViewController\n - [ ] Saving the EditViewController Changes with a Delegate\n - [ ] Implementing the EditViewController Delegate\n - [ ] Implementing the DetailViewController Delegate\n- [ ] Section 60 - Overdue Task List Assignment: Part 11\n - [ ] Make the Keyboards Go Away!\n- [ ] Section 61 - Overdue Task List Assignment: Solution\n - [ ] Overdue Task List Solution\n- [ ] Section 62 - A Deeper Look into Views\n - [ ] Subviews\n - [ ] Difference between a View's Frame and Bounds\n - [ ] A Deeper Look into a View's Frame and Bounds Part 1\n - [ ] A Deeper Look into a View's Frame and Bounds Part 2\n- [ ] Section 63 - Creating Views Programatically\n - [ ] Adding a UIView Programmatically\n - [ ] Adding UIButton Programmatically\n - [ ] Target Action\n- [ ] Section 64 - Challenge 13: Custom Views\n - [ ] The Challenge\n - [ ] Custom Views Solution\n- [ ] Section 65 - Autolayout and Constraints\n - [ ] Explore Autolayout\n - [ ] Demo Autolayouts Part 1\n - [ ] Demo Autolayouts Part 2\n- [ ] Section 66 - Further Exploration of Views\n - [ ] View Controllers Lifecycle\n - [ ] Custom Views\n- [ ] Section 67 - Introduction to UIBezierPaths\n - [ ] Drawing with UIBezierPath Part 1\n - [ ] Drawing with UIBezierPath Part 2\n- [ ] Section 68 - Challenge 14: UIBezierPath\n - [ ] The Challenge\n - [ ] UIBezierPath Solution\n- [ ] Section 69 - Another UIBezierPath\n - [ ] Switch to an iPad Application\n - [ ] More UIBezierPath\n - [ ] Creating our Path\n - [ ] Create a PathView Class\n- [ ] Section 70 - Creating a UIBezierPath on our Mountain\n - [ ] How Storyboard Files are Saved\n - [ ] Talk about Code Snippets\n - [ ] Finishing our Mountain Path\n- [ ] Section 71 - Gesture Recognizers and Screen Interaction\n - [ ] Gesture Recognizers\n - [ ] Pan Gesture Recognizers\n- [ ] Section 72 - Scoring for our MountainPath\n - [ ] Detect a UIBezierPath Hit\n - [ ] NSTIMER\n - [ ] Adding a Score\n - [ ] Finishing Touches on our Maze\n- [ ] Section 73 - Theory: Memory Management\n - [ ] Memory Management Detour\n - [ ] Object Ownership Strong and Weak\n - [ ] Retain Cycle they are Bad\n- [ ] Section 74 - Setting up our New Project\n - [ ] Beginning a New Journey\n - [ ] Starting our New Application and the App Delegate\n - [ ] Adding a Storyboard and a UITableViewController\n- [ ] Section 75 - Introduction to CoreData\n - [ ] Core Data an Initial Light Dusting\n - [ ] Updating our xdatamodel\n- [ ] Section 76 - UIAlertView\n - [ ] Adding a New Album with UIAlertView\n - [ ] UIAlertViewDelegate\n- [ ] Section 77 - Our CoreData\n - [ ] It;s Alive! Creating our First NSManagedObjectSubclass\n - [ ] Nsmanage ObjectSubclass a Quick Review\n - [ ] Finally Creating an Album\n- [ ] Section 78 - Accessing our Models from CoreData\n - [ ] Querying our Database for Objects\n - [ ] Review Accessing Objects from our Database\n - [ ] A Quick Refactor\n- [ ] Section 79 - UICollectionViewController\n - [ ] UICollectionViewController\n - [ ] Getting a Photo on those Slides\n- [ ] Section 80 - UIImagePickerController\n - [ ] UIImagePickerController\n - [ ] UIImagePickerController Delegate\n - [ ] Grabbing our Photo from UIImagePickerController\n- [ ] Section 81 - A Photo Model\n - [ ] Adjust our Core Data Model\n - [ ] Filling out our TWPIctureDataTransformer\n- [ ] Section 82 - Saving a Photo\n - [ ] Creating and Storing our Photos\n - [ ] Implementing Creating and Storing the Photos\n- [ ] Section 83 - Further Photo Intergration\n - [ ] Prepare the Segue Practice\n - [ ] Querying the Photos and Debugging\n- [ ] Section 84 - Deleting a Photo\n - [ ] Adding a Photo DetailViewController\n - [ ] Deleting a Photo from Core Data\n - [ ] Fixing the Bug\n- [ ] Section 85 - Preparing for Image Filters\n - [ ] Adding a CollectionViewController for the Filters\n - [ ] Creating Filters\n - [ ] CollectionView DataSource Methods\n- [ ] Section 86 - Image Filts\n - [ ] Adding our Filters\n - [ ] Through our Filters\n - [ ] Saving our Filters\n- [ ] Section 87 - Multithreading\n - [ ] Why is our App Slow\n - [ ] GCD and Threading Overview\n - [ ] Blocks\n- [ ] Section 88 - Blocks and Grand Central Dispatch\n - [ ] Creating a Block and Implementing it\n - [ ] GCD Example\n - [ ] Fixing our Bug\n- [ ] Section 89 - Getting Ready for Parse\n - [ ] Install Ruby and Ruby Gems\n - [ ] What is CocoaPods and Installation\n - [ ] Sign Up for Parse\n - [ ] What is Parse and Why use it\n - [ ] Installing Parse\n - [ ] Installing Parse The Hard Way\n- [ ] Section 90 - Testing Parse\n - [ ] Is Parse Working\n - [ ] Testing Parse Storyboard Setup\n- [ ] Section 91 - PFObjects\n - [ ] Creating PFObjects\n - [ ] Saving PFObjects\n - [ ] Querying for the PFObjects\n- [ ] Section 92 - Final Project: Matchedup\n - [ ] Matchedup\n - [ ] Setting up Parse\n - [ ] Sign up and Setting up Facebook\n - [ ] Integrating Facebook\n - [ ] Installing Facebook the Hard Way\n- [ ] Section 93 - Matchedup: Login Functionality\n - [ ] Login Functionality\n - [ ] PFUser\n - [ ] Saving User Information\n- [ ] Section 94 - Matchedup: The API Response\n - [ ] A Deeper Look in Facebook's API\n - [ ] Global Constants\n - [ ] Implementing Our Constants\n - [ ] Prep to Save a Photo Creating Constants and a URL\n - [ ] Saving the Image with a PFFile\n - [ ] Hitting the URL\n - [ ] Implementing NSURLConnection Delegates\n - [ ] What is the User is Already Logged in?\n - [ ] Adding a Picture to the ProfileViewController\n- [ ] Section 95 - Matchedup: Next Steps\n - [ ] Wireframes\n - [ ] Storyboard Refactor\n- [ ] Section 96 - Matchedup: Adding the ViewController's\n - [ ] Add a HomeViewController\n - [ ] Add a SettingsViewController\n - [ ] Add a EditProfileViewController\n - [ ] Add a ProfileViewController\n - [ ] Refactor Login View Controller\n - [ ] Setting up the HomeViewController\n- [ ] Section 97 - Matchedup: Managing Actions\n - [ ] Downloading the Home Photo\n - [ ] Updating the Home View's Information\n - [ ] Loading the Next Photo\n - [ ] Save a Like\n - [ ] Save a Dislike\n - [ ] Check for Likes\n - [ ] Check for Dislikes\n - [ ] Implement our Helper Methods\n - [ ] Doing a Initial Query for Likes\n - [ ] Creating Global Constants for the HomeViewController\n - [ ] Implementing Constants in the HomeViewController\n- [ ] Section 98 - Matchedup: Managing User Profiles\n - [ ] Creating a Test Users\n - [ ] Implementing the ProfileViewController\n - [ ] Global Constants for the Settings Page\n - [ ] Setting Initial Values and Preparing the Slider and Switches\n - [ ] Sliders and Switch Changes\n - [ ] Loading the EditProfileViewController\n - [ ] Saving the TagLine\n - [ ] Logging Out\n- [ ] Section 99 - Matchedup: Storyboard Setup\n - [ ] Storyboard Setup Match\n - [ ] Storyboard Setup Matches\n - [ ] Storyboard Setup Chat\n- [ ] Section 100 - Matchedup: Chat Prep\n - [ ] Check for Users Likes\n - [ ] Create a ChatRoom\n- [ ] Section 101 - Matchedup: MatchViewController\n - [ ] Prepping the MatchViewController\n - [ ] Implementing the MatchViewController\n - [ ] Adding a Delegate to the MatchViewController\n- [ ] Section 102 - Matchedup: Chat Setup\n - [ ] Finding Available Chats\n - [ ] Presenting Available Chats\n - [ ] Adding a Picture to the Chat List\n - [ ] Selecting a ChatRoom\n - [ ] Prepare the ChatViewController\n- [ ] Section 103 - Matchedup: Implementing Chats\n - [ ] Getting Started Implementing Chats\n - [ ] didSendText\n - [ ] messageTypeForRowAtIndexPath\n - [ ] bubbleImageViewWithType\n - [ ] Additional Methods\n - [ ] Optional Methods\n - [ ] Required\n- [ ] Section 104 - Matchedup: Check/Refresh Chats\n - [ ] Check for Chats\n - [ ] Refreshing our Chat\n - [ ] Testing Chats\n- [ ] Section 105 - Matchedup: Settings\n - [ ] Setup Defaults Settings\n - [ ] Allow Photo Helper Method\n - [ ] Implement the Allow Photo\n- [ ] Section 106 - Matchedup: Constants\n - [ ] Constants for ChatRoom and Chat\n - [ ] Implementing Constants\n- [ ] Section 107 - Matchedup: Assets\n - [ ] Asset Library\n - [ ] Setting a Global Nav Bar\n - [ ] Assets Login ViewController\n - [ ] Asset Home ViewController\n - [ ] Assets Profile ViewController\n - [ ] Finishing up the Profile ViewController\n - [ ] Assets Matches ViewController\n - [ ] Update the ChatViewController\n - [ ] Assets Setting ViewController\n - [ ] EditProfile View Controller\n- [ ] Section 108 - Matchedup: Transition\n - [ ] MatchViewController\n - [ ] Create a Transition Class\n - [ ] Conform and Implement UIViewControllerTransitioningDelegate\n- [ ] Section 109 - Matchedup: MixPanel\n - [ ] Getting Started with MixPanel\n - [ ] Implement MixPanel\n - [ ] Using MixPanel\n- [ ] Section 110 - You did it!\n - [ ] You did it!\n- [ ] Section 111 - Requested Topic: MVC Review\n - [ ] MVC Review Part 1\n - [ ] MVC Review Part 2\n - [ ] MVC Review Part 3\n - [ ] MVC Review Part 4\n- [ ] Section 112 - Requested Topic: World Traveler Part 1\n - [ ] Introduction Libraries and Learning Goals\n - [ ] Dependencies and Libraries\n - [ ] Storyboard Setup\n - [ ] Model Adding Entities\n - [ ] Model Adding Relationships\n - [ ] Adding a PrimaryAttributeKey\n - [ ] Subclassing MMRecord and keyPathForResponseObject\n - [ ] Subclassing AFHTTPSessionManager\n - [ ] foursquare ClientID and ClientSecret\n - [ ] Imports and MagicalRecord Setup\n - [ ] Customizing TCFourSquareSesssionManager\n - [ ] Making our First Request\n - [ ] Displaying the Information\n - [ ] Current Location\n - [ ] Venue Setup\n - [ ] Setting up the Map\n- [ ] Section 113 - Requested Topic: World Traveler Part 2\n - [ ] Finishing up our MapViewController\n - [ ] Updating the MapViewController\n - [ ] Setting up the Directions ViewController\n - [ ] Location Manager a Quick Review\n - [ ] Getting Directions\n - [ ] Using our Directions Method\n - [ ] Getting the Route\n - [ ] Adding a Latitude and Longitude Offset\n - [ ] Drawing our Overlay\n - [ ] Setup the DirectionsListViewController\n - [ ] Segue To The DirectionsListViewController\n - [ ] Setup the List of Directions\n - [ ] Adding a TableViewHeader\n - [ ] Adding Map Snapshots\n - [ ] Installing a Facebook style Menu with MMDrawerController\n - [ ] Adding a MenuViewController to the Storyboard\n - [ ] Update the App Delegate\n - [ ] Setting Drawer Attributes\n - [ ] Adding Animations to our Menu\n - [ ] Adding a Menu Button\n - [ ] Setting up the MenuViewController\n - [ ] Adding our ListViewController to the Menu\n - [ ] Selecting our Menu Item\n - [ ] Adding some More ViewControllers\n - [ ] Adding our new ViewControllers to the Menu ViewController\n - [ ] Adding Menu Buttons to the Favorite and Add Venue ViewControllers\n - [ ] Adding Favoriting\n - [ ] Save using MagicalRecord\n - [ ] Displaying our Favorite Venues with Magical Record\n - [ ] Creating our a custom Venue with Magical Record\n" }, { "alpha_fraction": 0.3617021143436432, "alphanum_fraction": 0.3723404109477997, "avg_line_length": 19.66666603088379, "blob_id": "498e593c2c18959b675634ec0d583471fd4f1a61", "content_id": "b2a988df17c2299e3025c776688a310665ecaa14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/leetcode/first-missing-positive/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n\ndef first_missing_positive(data):\n x = 1\n while True:\n for y in data:\n if x == y:\n x += 1\n break\n else:\n return x\n" }, { "alpha_fraction": 0.3727703094482422, "alphanum_fraction": 0.42300692200660706, "avg_line_length": 17.072368621826172, "blob_id": "91f5109a15a0518804389c461885cc920dec15a2", "content_id": "075e2d707dbe9e161f6b075226f00552068b7682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2747, "license_type": "no_license", "max_line_length": 85, "num_lines": 152, "path": "/algorithms/graph/single_source_shortest_path_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n\t\"github.com/jasonkeene/playground/data-structures/queue\"\n)\n\nfunc TestSingleSourceShortestPath(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tGraph graph.Graph\n\t\tStart int\n\t\tExpectedShortest []float64\n\t\tExpectedPrev []int\n\t}{\n\t\t\"basic graph\": {\n\t\t\tGraph: graph.Graph{\n\t\t\t\tNodes: []graph.Node{\n\t\t\t\t\t{\"A\"},\n\t\t\t\t\t{\"B\"},\n\t\t\t\t\t{\"C\"},\n\t\t\t\t\t{\"D\"},\n\t\t\t\t\t{\"E\"},\n\t\t\t\t},\n\t\t\t\tEdges: [][]graph.Edge{\n\t\t\t\t\t{\n\t\t\t\t\t\t{1, 1, 0},\n\t\t\t\t\t\t{2, 3, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{3, 2, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{4, 4, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{4, 3, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tStart: 0,\n\t\t\tExpectedShortest: []float64{0, 1, 3, 3, 6},\n\t\t\tExpectedPrev: []int{-1, 0, 0, 1, 3},\n\t\t},\n\t\t\"traffic graph\": {\n\t\t\tGraph: graph.Graph{\n\t\t\t\tNodes: []graph.Node{\n\t\t\t\t\t{\"A\"},\n\t\t\t\t\t{\"B\"},\n\t\t\t\t\t{\"C\"},\n\t\t\t\t\t{\"D\"},\n\t\t\t\t\t{\"E\"},\n\t\t\t\t\t{\"F\"},\n\t\t\t\t\t{\"G\"},\n\t\t\t\t\t{\"H\"},\n\t\t\t\t\t{\"I\"},\n\t\t\t\t},\n\t\t\t\tEdges: [][]graph.Edge{\n\t\t\t\t\t{\n\t\t\t\t\t\t{1, 1, 0},\n\t\t\t\t\t\t{3, 2, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{0, 2, 0},\n\t\t\t\t\t\t{2, 1, 0},\n\t\t\t\t\t\t{4, 5, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{1, 2, 0},\n\t\t\t\t\t\t{5, 1, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{0, 10, 0},\n\t\t\t\t\t\t{4, 2, 0},\n\t\t\t\t\t\t{6, 2, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{1, 20, 0},\n\t\t\t\t\t\t{3, 3, 0},\n\t\t\t\t\t\t{5, 2, 0},\n\t\t\t\t\t\t{7, 5, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{2, 30, 0},\n\t\t\t\t\t\t{4, 3, 0},\n\t\t\t\t\t\t{8, 1, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{3, 10, 0},\n\t\t\t\t\t\t{7, 3, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{4, 20, 0},\n\t\t\t\t\t\t{6, 4, 0},\n\t\t\t\t\t\t{8, 3, 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{5, 8, 0},\n\t\t\t\t\t\t{7, 4, 0},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tStart: 7,\n\t\t\tExpectedShortest: []float64{24, 25, 26, 14, 14, 11, 4, 0, 3},\n\t\t\tExpectedPrev: []int{3, 0, 1, 6, 5, 8, 7, -1, 7},\n\t\t},\n\t}\n\n\talgoFuncs := map[string](func(int, graph.Graph, queue.Priority) ([]float64, []int)){\n\t\t\"dijkstra\": graph.Dijkstra,\n\t\t\"bellman-ford\": func(s int, g graph.Graph, q queue.Priority) ([]float64, []int) {\n\t\t\treturn graph.BellmanFord(s, g)\n\t\t},\n\t}\n\n\tqueueFuncs := map[string](func() queue.Priority){\n\t\t\"heap\": func() queue.Priority {\n\t\t\treturn queue.NewHeap()\n\t\t},\n\t\t\"array\": func() queue.Priority {\n\t\t\treturn queue.NewArray()\n\t\t},\n\t}\n\n\tfor ak, af := range algoFuncs {\n\t\tt.Run(ak, func(t *testing.T) {\n\t\t\tfor qk, qf := range queueFuncs {\n\t\t\t\tt.Run(qk, func(t *testing.T) {\n\t\t\t\t\tfor k, tc := range testCases {\n\t\t\t\t\t\tt.Run(k, func(t *testing.T) {\n\t\t\t\t\t\t\tshortest, prev := af(\n\t\t\t\t\t\t\t\ttc.Start,\n\t\t\t\t\t\t\t\ttc.Graph,\n\t\t\t\t\t\t\t\tqf(),\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\tif !cmp.Equal(shortest, tc.ExpectedShortest) {\n\t\t\t\t\t\t\t\tt.Error(cmp.Diff(shortest, tc.ExpectedShortest))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif !cmp.Equal(prev, tc.ExpectedPrev) {\n\t\t\t\t\t\t\t\tt.Error(cmp.Diff(prev, tc.ExpectedPrev))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.795918345451355, "avg_line_length": 18.600000381469727, "blob_id": "0cc796995744e89cb4acd003c546db53c8b10ff5", "content_id": "270e961a89751f727d67d9421e82b08e883ae789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 98, "license_type": "no_license", "max_line_length": 47, "num_lines": 5, "path": "/parsers/go.mod", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "module github.com/jasonkeene/playground/parsers\n\ngo 1.12\n\nrequire github.com/google/go-cmp v0.2.0\n" }, { "alpha_fraction": 0.5499124526977539, "alphanum_fraction": 0.5586690306663513, "avg_line_length": 20.148147583007812, "blob_id": "2545dc7e753f50ba4626cd967cba63c5a84c9215", "content_id": "465f4f2dc4b8ce808dfd0bcce6cc9a0f15e0adf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 571, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/zeromq-the-guide/chapter2/sink.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"zhelpers.h\"\n\n\nint main(int argc, char *argv[])\n{\n void *context = zmq_ctx_new();\n\n // socket to read messages from\n void *puller = zmq_socket(context, ZMQ_PULL);\n char buffer[30];\n snprintf(buffer, sizeof(buffer), \"tcp://*:%s\", argv[1]);\n printf(\"Binding to: %s\\n\", buffer);\n zmq_bind(puller, buffer);\n\n for (int i = 0; ; i++) {\n char *string = s_recv(puller);\n free(string);\n printf(\"%d\\n\", i);\n fflush(stdout);\n }\n\n zmq_close(puller);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.7578475475311279, "alphanum_fraction": 0.7578475475311279, "avg_line_length": 19.272727966308594, "blob_id": "bc902bf532bbeb3954c9e62d05add4dc4a207d08", "content_id": "d08dca4020b2e00ae8f3279a7b167a4e8199de59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 223, "license_type": "no_license", "max_line_length": 76, "num_lines": 11, "path": "/cyclical-pipe/run.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ncleanup () {\n rm -rf bin\n}\n\nmkdir bin\ngo build -o bin/child github.com/jasonkeene/playground/cyclical-pipe/child\ntrap cleanup EXIT\n\nCHILD=bin/child go run github.com/jasonkeene/playground/cyclical-pipe/parent\n" }, { "alpha_fraction": 0.5868852734565735, "alphanum_fraction": 0.5934426188468933, "avg_line_length": 15.052631378173828, "blob_id": "d24d75385f1658129e70fa04aade5373beb9f77f", "content_id": "208ba5ead5428b0e68789fd188a4e006cb0fd506", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 305, "license_type": "no_license", "max_line_length": 65, "num_lines": 19, "path": "/cpp-how-to-program/chapter8/fig08_06.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nusing namespace std;\n\nint cubeByValue(int n)\n{\n return n * n * n;\n}\n\nint main()\n{\n int number = 5;\n\n cout << \"The original value of number is \" << number << endl;\n number = cubeByValue(number);\n cout << \"The new value of number is \" << number << endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5986546874046326, "alphanum_fraction": 0.6121076345443726, "avg_line_length": 16.84000015258789, "blob_id": "5fae32e7599089fe337f0ada3c18f9d6a4f260d7", "content_id": "9e530e3dce237681bdb871ca4a7daa832e2f98f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 447, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/algorithms/search/binary.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package search\n\n// Θ(log n)\nfunc BinarySearch(data []int, number int) int {\n\tmin_index := 0\n\tmax_index := len(data) - 1\n\n\tfor min_index <= max_index {\n\t\ttest_index := (min_index + max_index) / 2\n\n\t\t// test if index found\n\t\tif data[test_index] == number {\n\t\t\treturn test_index\n\t\t}\n\n\t\t// set indicies to appropriate values\n\t\tif data[test_index] > number {\n\t\t\tmax_index = test_index - 1\n\t\t} else {\n\t\t\tmin_index = test_index + 1\n\t\t}\n\t}\n\n\treturn -1\n}\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.729411780834198, "avg_line_length": 12.076923370361328, "blob_id": "0c4de428bdfa59b701165bb23d1cc738cb1ef871", "content_id": "e86e60bdb1f37958a42698cad6bb58ad695ac681", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 170, "license_type": "no_license", "max_line_length": 25, "num_lines": 13, "path": "/data-structures/queue/priority_queue.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package queue\n\ntype Element struct {\n\tKey float64\n\tValue interface{}\n}\n\ntype Priority interface {\n\tEmpty() bool\n\tInsert(Element)\n\tPopMin() Element\n\tDecrease(Element)\n}\n" }, { "alpha_fraction": 0.7469879388809204, "alphanum_fraction": 0.759036123752594, "avg_line_length": 82, "blob_id": "9ca0496a24d0dd78450a28535c75c9f95122e647", "content_id": "13ad037d5e59ed1a0ae18b522dc76bc3661b7cac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 168, "license_type": "no_license", "max_line_length": 128, "num_lines": 2, "path": "/zeromq-the-guide/notes.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\"The zmq_socket() man page is fairly clear about the patterns — it's worth reading several times until it starts to make sense.\"\nhttp://api.zeromq.org/3-2:zmq-socket\n" }, { "alpha_fraction": 0.637005627155304, "alphanum_fraction": 0.637005627155304, "avg_line_length": 16.700000762939453, "blob_id": "af55a28357d0455ba76c44e06e14728967b81f2d", "content_id": "961f3198f9f0a8441ea63212a3e3cc55f92f9225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 708, "license_type": "no_license", "max_line_length": 74, "num_lines": 40, "path": "/elasticsearch-experiment/read.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n)\n\nvar (\n\turl = flag.String(\"url\", \"\", \"elastic search api url\")\n\ttype_ = flag.String(\"type\", \"\", \"the type of document to insert\")\n\tindex = flag.String(\"index\", \"\", \"the index to insert the document into\")\n)\n\nconst urlFmt = \"%s/%s/%s/_search?pretty\"\n\nfunc fatal(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc Search() {\n\tgetURL := fmt.Sprintf(urlFmt, *url, *index, *type_)\n\tprintln(getURL)\n\trequest, err := http.NewRequest(\"GET\", getURL, nil)\n\tfatal(err)\n\tresp, err := http.DefaultClient.Do(request)\n\tfatal(err)\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tfatal(err)\n\tfmt.Println(string(data))\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tSearch()\n}\n" }, { "alpha_fraction": 0.3887884318828583, "alphanum_fraction": 0.41952982544898987, "avg_line_length": 17.433332443237305, "blob_id": "5481a82d38d2cae3505bd4785c6086c283f26a52", "content_id": "0ad4ac34f06a8d94a3f0f8fabcd29a1509c215f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 553, "license_type": "no_license", "max_line_length": 63, "num_lines": 30, "path": "/golang-tour/basics/flow_control_statements_for_if_else_and_switch/08_exercise_loops_and_functions.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n \"fmt\"\n \"math\"\n)\n\nfunc Sqrt(x float64) (float64, int) {\n y, z := x, x\n i := 0\n for {\n i++\n z = z - (z * z - x) / (2 * z);\n if math.Abs(y - z) < 1e-15 {\n break\n }\n y = z\n }\n return z, i\n}\n\nfunc main() {\n fmt_str := \"%d: %g (%d) - %g = %g\\n\"\n for i := 1; i < 100; i++ {\n value, iterations := Sqrt(float64(i))\n actual := math.Sqrt(float64(i))\n diff := value - actual\n fmt.Printf(fmt_str, i, value, iterations, actual, diff)\n }\n}\n" }, { "alpha_fraction": 0.48414984345436096, "alphanum_fraction": 0.4985590875148773, "avg_line_length": 17.263158798217773, "blob_id": "b619c01a2b4ead97d8dd942c363298bc8c2a8e3d", "content_id": "849c5e6859ccc75c738fbcd555f2842e4f88e2a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 347, "license_type": "no_license", "max_line_length": 40, "num_lines": 19, "path": "/golang-tour/basics/more_types_structs_slices_and_maps/14_exercise_slices.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"code.google.com/p/go-tour/pic\"\n\nfunc Pic(dx, dy int) [][]uint8 {\n parent := make([][]uint8, dy)\n for i := range parent {\n row := make([]uint8, dx)\n for j := range row {\n row[j] = uint8(i) * uint8(j)\n }\n parent[i] = row\n }\n return parent\n}\n\nfunc main() {\n pic.Show(Pic)\n}\n" }, { "alpha_fraction": 0.42739078402519226, "alphanum_fraction": 0.48878395557403564, "avg_line_length": 14.125, "blob_id": "55dfdd9896a8648febd5661872151c359c6fd379", "content_id": "20356067ced16a9dbdd54574d02c70752f3d6ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 847, "license_type": "no_license", "max_line_length": 52, "num_lines": 56, "path": "/algorithms/graph/floyd_warshall_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/graph\"\n)\n\nfunc TestFloydWarshall(t *testing.T) {\n\tg := graph.Graph{\n\t\tNodes: []graph.Node{\n\t\t\t{\"A\"},\n\t\t\t{\"B\"},\n\t\t\t{\"C\"},\n\t\t\t{\"D\"},\n\t\t},\n\t\tEdges: [][]graph.Edge{\n\t\t\t{\n\t\t\t\t{1, 3, 0},\n\t\t\t\t{2, 8, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{3, 1, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{1, 4, 0},\n\t\t\t},\n\t\t\t{\n\t\t\t\t{0, 2, 0},\n\t\t\t\t{2, -5, 0},\n\t\t\t},\n\t\t},\n\t}\n\n\tshortest, prev := graph.FloydWarshall(g)\n\texpectedShortest := [][]float64{\n\t\t{0, 3, -1, 4},\n\t\t{3, 0, -4, 1},\n\t\t{7, 4, 0, 5},\n\t\t{2, -1, -5, 0},\n\t}\n\texpectedPrev := [][]int{\n\t\t{-1, 0, 3, 1},\n\t\t{3, -1, 3, 1},\n\t\t{3, 2, -1, 1},\n\t\t{3, 2, 3, -1},\n\t}\n\n\tif !cmp.Equal(shortest, expectedShortest) {\n\t\tt.Fatal(cmp.Diff(shortest, expectedShortest))\n\t}\n\tif !cmp.Equal(prev, expectedPrev) {\n\t\tt.Fatal(cmp.Diff(prev, expectedPrev))\n\t}\n}\n" }, { "alpha_fraction": 0.5391872525215149, "alphanum_fraction": 0.5907111763954163, "avg_line_length": 32.585365295410156, "blob_id": "1388a06e48f9cf2045231fe1600af1aadb969112", "content_id": "1fbbb0effb0fb6ba091c811292ac2df94fde634c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 72, "num_lines": 41, "path": "/rfc5424/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [RFC 5424 - The Syslog Protocol](https://tools.ietf.org/html/rfc5424)\n\n - [x] 1. Introduction\n - [x] 2. Conventions Used in This Document\n - [x] 3. Definitions\n - [x] 4. Basic Principles\n - [x] 5. Transport Layer Protocol\n - [x] 6. Syslog Message Format\n - [x] 6.1. Message Length\n - [x] 6.2. HEADER\n - [x] 6.3. STRUCTURED-DATA\n - [x] 7. Structured Data IDs\n - [x] 7.1. timeQuality\n - [x] 7.2. origin\n - [x] 7.3. meta\n - [x] 8. Security Considerations\n - [x] 8.1. UNICODE\n - [x] 8.2. Control Characters\n - [x] 8.3. Message Truncation\n - [x] 8.4. Replay\n - [x] 8.5. Reliable Delivery\n - [x] 8.6. Congestion Control\n - [x] 8.7. Message Integrity\n - [x] 8.8. Message Observation\n - [x] 8.9. Inappropriate Configuration\n - [x] 8.10. Forwarding Loop\n - [x] 8.11. Load Considerations\n - [x] 8.12. Denial of Service\n - [x] 9. IANA Considerations\n - [x] 10. Working Group\n - [x] 11. Acknowledgments\n - [x] 12. References\n - [x] Appendix A. Implementer Guidelines\n - [x] A.1. Relationship with BSD Syslog\n - [x] A.2. Message Length\n - [x] A.3. Severity Values\n - [x] A.4. TIME-SECFRAC Precision\n - [x] A.5. Case Convention for Names\n - [x] A.6. Syslog Applications Without Knowledge of Time\n - [x] A.7. Notes on the timeQuality SD-ID\n - [x] A.8. UTF-8 Encoding and the BOM\n" }, { "alpha_fraction": 0.5644329786300659, "alphanum_fraction": 0.5773195624351501, "avg_line_length": 21.823530197143555, "blob_id": "ecc7c51922ea0fca4be54c31ed47ad91d2676bae", "content_id": "42e5e155e68a6dc38c2475b8e511899cee79b079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 776, "license_type": "no_license", "max_line_length": 57, "num_lines": 34, "path": "/zeromq-the-guide/chapter1/weather_server.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <zmq.h>\n#include <assert.h>\n#include \"zhelpers.h\"\n#include \"weather.h\"\n\n\n#define PORT \"5556\"\n\n\nint main(void)\n{\n printf(\"Starting weather server on port \" PORT \"\\n\");\n void *context = zmq_ctx_new();\n void *publisher = zmq_socket(context, ZMQ_PUB);\n int rc = zmq_bind(publisher, \"tcp://*:\"PORT);\n assert(rc == 0);\n /* rc = zmq_bind(publisher, \"ipc://weather.ipc\"); */\n /* assert(rc == 0); */\n\n // init random number generator\n while (1) {\n //\n Weather *weather = Weather_create_fake();\n printf(\"sending: %s\\n\", weather->str);\n s_send(publisher, weather->str);\n Weather_destroy(weather);\n s_sleep(10);\n }\n\n zmq_close(publisher);\n zmq_ctx_destroy(context);\n return 0;\n}\n" }, { "alpha_fraction": 0.420634925365448, "alphanum_fraction": 0.46296295523643494, "avg_line_length": 17.899999618530273, "blob_id": "8d73e2ea159fc07441db7197df425f7b29584350", "content_id": "593cff8a866257e94583f04c8fc86db0200d6b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 378, "license_type": "no_license", "max_line_length": 61, "num_lines": 20, "path": "/golang-tour/basics/more_types_structs_slices_and_maps/09_making_slices.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc main() {\n a := make([]int, 5)\n a = []int{0, 1, 2, 3, 4}\n printSlice(\"a\", a)\n b := make([]int, 0, 6)\n b = []int{5, 6, 7, 8, 9}\n printSlice(\"b\", b)\n c := b[:2]\n printSlice(\"c\", c)\n d := c[2:5]\n printSlice(\"d\", d)\n}\n\nfunc printSlice(s string, x []int) {\n fmt.Printf(\"%s len=%d cap=%d %v\\n\", s, len(x), cap(x), x)\n}\n" }, { "alpha_fraction": 0.5969465374946594, "alphanum_fraction": 0.605597972869873, "avg_line_length": 16.389381408691406, "blob_id": "7214f783199007afe5d0a80d4fa79c59011b9703", "content_id": "ab9581b5a1fda5f6d3f0e97f216858a090c061c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1965, "license_type": "no_license", "max_line_length": 73, "num_lines": 113, "path": "/data-structures/queue/heap_priority_queue.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package queue\n\nimport (\n\t\"log\"\n\t\"math\"\n)\n\ntype Heap struct {\n\tvalues []Element\n\tindices map[interface{}]int\n}\n\nfunc NewHeap(v ...Element) *Heap {\n\th := &Heap{\n\t\tvalues: make([]Element, len(v), len(v)*2),\n\t\tindices: make(map[interface{}]int, len(v)),\n\t}\n\tprev := math.Inf(-1)\n\tfor i, he := range v {\n\t\tif prev > he.Key {\n\t\t\tlog.Fatalf(\"invalid heap init: %v\", v)\n\t\t}\n\t\tprev = he.Key\n\t\th.values[i] = he\n\t\th.indices[he.Value] = i\n\t}\n\treturn h\n}\n\nfunc (h *Heap) Empty() bool {\n\treturn len(h.values) == 0\n}\n\nfunc (h *Heap) Insert(he Element) {\n\th.values = append(h.values, he)\n\tlastIdx := len(h.values) - 1\n\th.indices[he.Value] = lastIdx\n\th.up(lastIdx)\n}\n\nfunc (h *Heap) up(idx int) {\n\the := h.values[idx]\n\tfor {\n\t\tparentIdx := (idx - 1) / 2\n\t\tparent := h.values[parentIdx]\n\n\t\tif he.Key >= parent.Key {\n\t\t\tbreak\n\t\t}\n\n\t\th.values[idx], h.values[parentIdx] = h.values[parentIdx], h.values[idx]\n\t\th.indices[he.Value] = parentIdx\n\t\th.indices[parent.Value] = idx\n\n\t\tidx = parentIdx\n\t}\n}\n\nfunc (h *Heap) PopMin() Element {\n\tmin := h.values[0]\n\tdelete(h.indices, min.Value)\n\n\tlastIdx := len(h.values) - 1\n\tlast := h.values[lastIdx]\n\th.values[0] = last\n\th.indices[last.Value] = 0\n\th.values = h.values[:lastIdx]\n\n\tif len(h.values) != 0 {\n\t\th.down(0)\n\t}\n\n\treturn min\n}\n\nfunc (h *Heap) down(idx int) {\n\the := h.values[idx]\n\tfor {\n\t\tleftIdx := idx*2 + 1\n\t\trightIdx := leftIdx + 1\n\n\t\tif leftIdx > len(h.values)-1 {\n\t\t\tbreak\n\t\t}\n\n\t\tchildIdx := leftIdx\n\t\tif rightIdx <= len(h.values)-1 {\n\t\t\tif h.values[rightIdx].Key < h.values[leftIdx].Key {\n\t\t\t\tchildIdx = rightIdx\n\t\t\t}\n\t\t}\n\n\t\tchild := h.values[childIdx]\n\t\tif he.Key <= child.Key {\n\t\t\tbreak\n\t\t}\n\t\th.values[idx], h.values[childIdx] = h.values[childIdx], h.values[idx]\n\t\th.indices[he.Value] = childIdx\n\t\th.indices[child.Value] = idx\n\n\t\tidx = childIdx\n\t}\n}\n\nfunc (h *Heap) Decrease(he Element) {\n\tidx, ok := h.indices[he.Value]\n\tif !ok {\n\t\tlog.Fatalf(\"Value was not in heap: %v\", he.Value)\n\t}\n\n\th.values[idx].Key = he.Key\n\th.up(idx)\n}\n" }, { "alpha_fraction": 0.7094972133636475, "alphanum_fraction": 0.7094972133636475, "avg_line_length": 24.571428298950195, "blob_id": "f41b7bf50c6c66ecddcfa38633db79f27f825d0b", "content_id": "ff4fdd468271eb9b4765aaa3fcccbb4b3c6db4fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 358, "license_type": "no_license", "max_line_length": 62, "num_lines": 14, "path": "/python-logging/advanced-logging-tutorial/log_exec.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import logging\n\n\nlogging.basicConfig(filename=\"log_exec.log\", filemode=\"w\")\nlogger = logging.getLogger(__name__)\nprint logger is logging.getLogger(__name__)\n\n\ntry:\n foobar\nexcept NameError:\n logger.warn(\"OMG! A NameError was raised!\", exc_info=True)\n logger.error(\"OMG! A NameError was raised!\")\n logger.exception(\"OMG! A NameError was raised!\")\n" }, { "alpha_fraction": 0.5387881398200989, "alphanum_fraction": 0.5492088198661804, "avg_line_length": 24.401960372924805, "blob_id": "6389e5f32de4e6b087c0ca827ad025c5440abf1e", "content_id": "272748d883dc3df5c1c5e686e01446327a3f020f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2591, "license_type": "no_license", "max_line_length": 71, "num_lines": 102, "path": "/zeromq-the-guide/chapter2/zeroapi/client.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <assert.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <zmq.h>\n\n#include \"utils.h\"\n\n\nchar *node_id;\n\n\nint main(int argc, char *argv[])\n{\n // verify args\n assert(argc > 2);\n assert(argc % 2 == 1);\n\n // set node id\n node_id = generate_node_id();\n printf(\"Node ID: %s\\n\", node_id);\n\n // create context and sockets\n void *context = zmq_ctx_new();\n void *pusher = zmq_socket(context, ZMQ_PUSH);\n void *subscriber = zmq_socket(context, ZMQ_SUB);\n zmq_setsockopt(subscriber, ZMQ_SUBSCRIBE, node_id, 8);\n\n // get ports list\n IntList *ports = get_ports(argc, argv);\n\n // connect sockets\n IntNode *port = ports->head;\n for (int i = 0; port != NULL; i++) {\n char *connect_str = connection_str(port->data);\n if (i % 2 == 0) {\n printf(\"Connecting pusher to port %i.\\n\", port->data);\n zmq_connect(pusher, connect_str);\n } else {\n printf(\"Connecting subscriber to port %i.\\n\", port->data);\n zmq_connect(subscriber, connect_str);\n }\n free(connect_str);\n port = port->next;\n }\n\n\n for (int i = 0; i < 10; i++) {\n // loop variables\n zmq_msg_t send_msg, recv_msg;\n char *msg_str;\n char buffer[30] = {0};\n int msg_len;\n\n // sleep for a wee bit\n usleep(1000000);\n\n // setup push message\n snprintf(buffer, sizeof(buffer), \"%s:message #%i\", node_id, i);\n msg_str = strndup(buffer, sizeof(buffer));\n msg_len = strnlen(msg_str, 50);\n zmq_msg_init_size(&send_msg, msg_len);\n memcpy(&send_msg, msg_str, msg_len);\n\n // notify console that you are about to send message\n printf(\"sending: %s (len: %i)\\n\", msg_str, msg_len);\n\n // send message\n zmq_msg_send(&send_msg, pusher, 0);\n\n // cleanup\n free(msg_str);\n zmq_msg_close(&send_msg);\n\n // setup subscribe message\n zmq_msg_init(&recv_msg);\n zmq_msg_recv(&recv_msg, subscriber, 0);\n msg_len = zmq_msg_size(&recv_msg);\n msg_str = malloc(msg_len + 1);\n memcpy(msg_str, zmq_msg_data(&recv_msg), msg_len);\n msg_str[msg_len] = 0;\n\n // notify console that you got a message\n printf(\"got %s\\n\", msg_str);\n\n // cleanup\n free(msg_str);\n zmq_msg_close(&recv_msg);\n }\n\n // cleanup\n IntList_destroy(ports);\n zmq_close(pusher);\n zmq_close(subscriber);\n zmq_ctx_destroy(context);\n free(node_id);\n\n printf(\"Stopping client.\\n\");\n return 0;\n}\n" }, { "alpha_fraction": 0.6241258978843689, "alphanum_fraction": 0.6477272510528564, "avg_line_length": 17.754098892211914, "blob_id": "37b9795c210942cdce45872f489e306a9fa168fb", "content_id": "567fb359a83a80c4628e919292d29601e2d23c0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1144, "license_type": "no_license", "max_line_length": 74, "num_lines": 61, "path": "/algorithms/compression/lzw_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package compression_test\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/compression\"\n)\n\nfunc TestLZW(t *testing.T) {\n\ttext := []byte(\"TATAGATCTTAATATA\")\n\tcompressed := compression.LZW(text)\n\texpectedCompressed := []int{\n\t\t'T',\n\t\t'A',\n\t\t256, // TA\n\t\t'G',\n\t\t257, // AT\n\t\t'C',\n\t\t'T',\n\t\t256, // TA\n\t\t257, // AT\n\t\t264, // ATA\n\t}\n\n\tif !cmp.Equal(compressed, expectedCompressed) {\n\t\tt.Fatal(cmp.Diff(compressed, expectedCompressed))\n\t}\n\n\tdecompressed := compression.LZWDecompress(compressed)\n\tif !cmp.Equal(decompressed, text) {\n\t\tt.Fatal(cmp.Diff(decompressed, text))\n\t}\n}\n\nfunc TestLZWCorrectness(t *testing.T) {\n\ttestCompressionCorrectness(t, compression.LZW, compression.LZWDecompress)\n}\n\nfunc testCompressionCorrectness(\n\tt *testing.T,\n\tcomp func([]byte) []int,\n\tdec func([]int) []byte,\n) {\n\tfor i := 0; i < 1024; i++ {\n\t\ttc := []byte(randomString(i))\n\t\tresult := dec(comp(tc))\n\t\tif !cmp.Equal(tc, result) {\n\t\t\tt.Fatal(cmp.Diff(tc, result))\n\t\t}\n\t}\n}\n\nfunc randomString(n int) string {\n\tvar s string\n\tfor i := 0; i < n; i++ {\n\t\ts += string(rand.Intn(26) + 1 | 0x40)\n\t}\n\treturn s\n}\n" }, { "alpha_fraction": 0.594427227973938, "alphanum_fraction": 0.6006191968917847, "avg_line_length": 16, "blob_id": "9ca3db28143b5f1ae98c593ca3d9f1826bea72a7", "content_id": "f270cf45202266f1bcab52cecb07aa2a2f027add", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 323, "license_type": "no_license", "max_line_length": 65, "num_lines": 19, "path": "/cpp-how-to-program/chapter8/fig08_07.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nusing namespace std;\n\nvoid cubeByReference(int *nPtr)\n{\n *nPtr = *nPtr * *nPtr * *nPtr;\n}\n\nint main()\n{\n int number = 5;\n\n cout << \"The original value of number is \" << number << endl;\n cubeByReference(&number);\n cout << \"The new value of number is \" << number << endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6042618155479431, "alphanum_fraction": 0.6377473473548889, "avg_line_length": 12.408163070678711, "blob_id": "cf4846a09f3b479f8d2efb781b12104f77c1fd21", "content_id": "6cfaca6fd113b39a9b627bef797d72bb1232f427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 657, "license_type": "no_license", "max_line_length": 37, "num_lines": 49, "path": "/go-by-example/interfaces.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\ntype geometry interface {\n\tarea() float64\n\tperim() float64\n}\n\ntype square struct {\n\twidth, height float64\n}\n\nfunc (s *square) area() float64 {\n\treturn s.width * s.height\n}\n\nfunc (s *square) perim() float64 {\n\treturn s.width*2 + s.height*2\n}\n\ntype circle struct {\n\tradius float64\n}\n\nfunc (c *circle) area() float64 {\n\treturn math.Pi * c.radius * c.radius\n}\n\nfunc (c *circle) perim() float64 {\n\treturn math.Pi * 2 * c.radius\n}\n\nfunc measure(g geometry) {\n\tfmt.Println(g)\n\tfmt.Println(\"area:\", g.area())\n\tfmt.Println(\"perim:\", g.perim())\n}\n\nfunc main() {\n\ts := &square{3, 4}\n\tc := &circle{5}\n\n\tmeasure(s)\n\tmeasure(c)\n}\n" }, { "alpha_fraction": 0.5907928347587585, "alphanum_fraction": 0.594629168510437, "avg_line_length": 14.333333015441895, "blob_id": "8b694ffa5dc8614dcc47fde15a1b33881b4bc0ef", "content_id": "e75ac36d565aba610b1dde833a633a5a481bd3b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 782, "license_type": "no_license", "max_line_length": 55, "num_lines": 51, "path": "/cyclical-pipe/parent/main.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc main() {\n\tcmd := exec.Command(os.Getenv(\"CHILD\"))\n\tchildIn, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer childIn.Close()\n\tchildOut, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer childOut.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// prime the pump\n\tfmt.Fprintln(childIn, \"12\")\n\n\tin := bufio.NewReader(childOut)\n\n\tfor {\n\t\ttext, err := in.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tn, err := strconv.Atoi(strings.TrimRight(text, \"\\n\"))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tnew := n + 2\n\t\tlog.Println(\"sending new value:\", new)\n\t\tfmt.Fprintf(childIn, \"%d\\n\", new)\n\t\ttime.Sleep(time.Second)\n\t}\n}\n" }, { "alpha_fraction": 0.6813187003135681, "alphanum_fraction": 0.8186812996864319, "avg_line_length": 29.33333396911621, "blob_id": "ad78b79b5553a2912fb751bbea76c3afa77ec871", "content_id": "f8e8329bdb7ce34e3cabac8d0a06295e3b54cd23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 182, "license_type": "no_license", "max_line_length": 84, "num_lines": 6, "path": "/algorithms/go.mod", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "module github.com/jasonkeene/playground/algorithms\n\nrequire (\n\tgithub.com/google/go-cmp v0.2.0\n\tgithub.com/jasonkeene/playground/data-structures v0.0.0-20190223225500-5caeddf5042e\n)\n" }, { "alpha_fraction": 0.48218029737472534, "alphanum_fraction": 0.5104821920394897, "avg_line_length": 17.52427101135254, "blob_id": "c0bd9e868c3e540c6e65855e2fef5a49e15e829c", "content_id": "a79df4bcccec47f817f1175376b5e16b4fc9e7d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1908, "license_type": "no_license", "max_line_length": 51, "num_lines": 103, "path": "/algorithms/str/lcs_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package str_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/str\"\n)\n\nfunc TestLCS(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\ta string\n\t\tb string\n\t\tlcs map[string]struct{}\n\t}{\n\t\t\"single char\": {\n\t\t\ta: \"abcdef\",\n\t\t\tb: \"zyqwck\",\n\t\t\tlcs: map[string]struct{}{\n\t\t\t\t\"c\": struct{}{},\n\t\t\t},\n\t\t},\n\t\t\"adjacent characters\": {\n\t\t\ta: \"abcdef\",\n\t\t\tb: \"qzbcay\",\n\t\t\tlcs: map[string]struct{}{\n\t\t\t\t\"bc\": struct{}{},\n\t\t\t},\n\t\t},\n\t\t\"disjoint characters\": {\n\t\t\ta: \"abcdef\",\n\t\t\tb: \"azczez\",\n\t\t\tlcs: map[string]struct{}{\n\t\t\t\t\"ace\": struct{}{},\n\t\t\t},\n\t\t},\n\t\t\"multiple solutions\": {\n\t\t\ta: \"abcdef\",\n\t\t\tb: \"defabc\",\n\t\t\tlcs: map[string]struct{}{\n\t\t\t\t\"abc\": struct{}{},\n\t\t\t\t\"def\": struct{}{},\n\t\t\t},\n\t\t},\n\t\t\"dna\": {\n\t\t\ta: \"CATCGA\",\n\t\t\tb: \"GTACCGTCA\",\n\n\t\t\tlcs: map[string]struct{}{\n\t\t\t\t\"TCGA\": struct{}{},\n\t\t\t\t\"ACGA\": struct{}{},\n\t\t\t\t\"ATCA\": struct{}{},\n\t\t\t\t\"CCGA\": struct{}{},\n\t\t\t\t\"CTCA\": struct{}{},\n\t\t\t},\n\t\t},\n\t}\n\n\timpls := map[string](func(string, string) string){\n\t\t\"LCS\": str.LCS,\n\t\t\"LCSTable\": str.LCSTable,\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor fname, f := range impls {\n\t\t\t\tt.Run(fname, func(t *testing.T) {\n\t\t\t\t\tresult := f(tc.a, tc.b)\n\t\t\t\t\tif _, ok := tc.lcs[result]; !ok {\n\t\t\t\t\t\tt.Fatalf(\"invalid result: %q\", result)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"all \"+name, func(t *testing.T) {\n\t\t\tresult := str.AllLCS(tc.a, tc.b)\n\n\t\t\tif !cmp.Equal(result, tc.lcs) {\n\t\t\t\tt.Fatal(cmp.Diff(result, tc.lcs))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestLCSLengthTable(t *testing.T) {\n\ta := \"CATCGA\"\n\tb := \"GTACCGTCA\"\n\n\tresult := str.LCSLengthTable(a, b)\n\texpected := [][]int{\n\t\t{0, 0, 0, 1, 1, 1, 1, 1, 1},\n\t\t{0, 0, 1, 1, 1, 1, 1, 1, 2},\n\t\t{0, 1, 1, 1, 1, 1, 2, 2, 2},\n\t\t{0, 1, 1, 2, 2, 2, 2, 3, 3},\n\t\t{1, 1, 1, 2, 2, 3, 3, 3, 3},\n\t\t{1, 1, 2, 2, 2, 3, 3, 3, 4},\n\t}\n\n\tif !cmp.Equal(result, expected) {\n\t\tt.Fatal(cmp.Diff(result, expected))\n\t}\n}\n" }, { "alpha_fraction": 0.5498891472816467, "alphanum_fraction": 0.5631929039955139, "avg_line_length": 15.107142448425293, "blob_id": "c2870f0bd9d18df0b4d5411fed8dfa2a21281cfc", "content_id": "95d28944c6e971ea787dd5a61059cc60c948187d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 451, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/algorithms/graph/bellman_ford.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nimport (\n\t\"math\"\n)\n\nfunc BellmanFord(s int, g Graph) ([]float64, []int) {\n\tshortest := make([]float64, len(g.Nodes))\n\tfor i := range shortest {\n\t\tif i != s {\n\t\t\tshortest[i] = math.Inf(1)\n\t\t}\n\t}\n\tprev := make([]int, len(g.Nodes))\n\tfor i := range prev {\n\t\tprev[i] = -1\n\t}\n\n\tfor range g.Nodes {\n\t\tfor i, es := range g.Edges {\n\t\t\tfor _, e := range es {\n\t\t\t\trelax(i, e.Target, e.Weight, shortest, prev)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn shortest, prev\n}\n" }, { "alpha_fraction": 0.551068902015686, "alphanum_fraction": 0.570071280002594, "avg_line_length": 21.157894134521484, "blob_id": "68d3079efd5b73fba23ff0963970872f94aad8fa", "content_id": "2856b7acc401cba3e9ee63c5f5fa525e7075c557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 842, "license_type": "no_license", "max_line_length": 78, "num_lines": 38, "path": "/golang-tour/methods_and_interfaces/09_exercise_errors.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n \"fmt\"\n \"math\"\n)\n\ntype ErrNegativeSqrt float64\n\nfunc (e ErrNegativeSqrt) Error() string {\n // I decided to use Sprintf here vs Sprint, however to answer the question\n // posed in the exercise, the reason Sprint would recurse forever is\n // because it probably finds the Error method on e as the default format\n // method and calls it, then it calls Sprint again, and so on..\n return fmt.Sprintf(\"cannot Sqrt negative number: %f\", e)\n}\n\nfunc Sqrt(x float64) (float64, error) {\n if x < 0 {\n return 0.0, ErrNegativeSqrt(x)\n }\n y, z := x, x\n i := 0\n for {\n i++\n z = z - (z * z - x) / (2 * z);\n if math.Abs(y - z) < 1e-15 {\n break\n }\n y = z\n }\n return z, nil\n}\n\nfunc main() {\n fmt.Println(Sqrt(2))\n fmt.Println(Sqrt(-2))\n}\n" }, { "alpha_fraction": 0.41980475187301636, "alphanum_fraction": 0.4867503345012665, "avg_line_length": 27.68000030517578, "blob_id": "7e410acca478e326480cfa5e8e0afa38325a8507", "content_id": "41a989be387cf556101787727d3fd73c7cdef003", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/leetcode/spiral-matrix/test_solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "from solution import Solution\n\n\nclass DescribeSolution:\n def it_computes_spiral_for_square_input(self):\n input = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n expected_output = [1, 2, 3, 6, 9, 8, 7, 4, 5]\n assert Solution().spiralOrder(input) == expected_output\n\n def it_computes_spiral_for_rectangle_input(self):\n input = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12],\n ]\n expected_output = [1, 2, 3, 6, 9, 12, 11, 10, 7, 4, 5, 8]\n assert Solution().spiralOrder(input) == expected_output\n\n def it_handles_empty_input(self):\n assert Solution().spiralOrder([]) == []\n" }, { "alpha_fraction": 0.5450236797332764, "alphanum_fraction": 0.6461295485496521, "avg_line_length": 21.60714340209961, "blob_id": "d6582732df7b8b5a9cf2a143bde0859ded9d1454", "content_id": "7dc1551f616670f275d947dde0a22b59991e4a89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 633, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/cpp-how-to-program/chapter8/Makefile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "all: $(shell ls *.cpp | sed -E 's/(.*).cpp/bin\\/\\1/')\n\nclean:\n\t[ -d bin ] && rm -r bin || exit 0\n\nbin:\n\tmkdir bin\n\nbin/fig08_04: bin fig08_04.cpp\n\tg++ -std=c++11 fig08_04.cpp -o bin/fig08_04\n\nbin/fig08_06: bin fig08_06.cpp\n\tg++ -std=c++11 fig08_06.cpp -o bin/fig08_06\n\nbin/fig08_07: bin fig08_07.cpp\n\tg++ -std=c++11 fig08_07.cpp -o bin/fig08_07\n\nbin/nullptr: bin nullptr.cpp\n\tg++ -std=c++11 nullptr.cpp -o bin/nullptr\n\nbin/arrays: bin arrays.cpp\n\tg++ -std=c++11 arrays.cpp -o bin/arrays\n\nbin/sizeof: bin sizeof.cpp\n\tg++ -std=c++11 sizeof.cpp -o bin/sizeof\n\nbin/cstrings: bin cstrings.cpp\n\tg++ -std=c++11 cstrings.cpp -o bin/cstrings\n" }, { "alpha_fraction": 0.5651612877845764, "alphanum_fraction": 0.6083871126174927, "avg_line_length": 25.27118682861328, "blob_id": "50b52b2e3bf1f8397f56410f618d52baefeab6dd", "content_id": "02659fe7b9c60e42268d2f15ecf287fe7c286403", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Ruby", "length_bytes": 1550, "license_type": "no_license", "max_line_length": 57, "num_lines": 59, "path": "/saltstack/prototypes/Vagrantfile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\nVAGRANTFILE_API_VERSION = \"2\"\nIPS = {\n \"master\" => \"10.13.37.10\",\n \"proxy1\" => \"10.13.37.11\",\n \"proxy2\" => \"10.13.37.12\",\n \"app1\" => \"10.13.37.13\",\n \"app2\" => \"10.13.37.14\",\n}\n\nVagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n config.vm.box = \"ubuntu/trusty64\"\n\n config.vm.define \"master\" do |master|\n hostname = \"master\"\n master.vm.network :private_network, ip: IPS[hostname]\n master.vm.provision \"shell\" do |s|\n s.path = \"bin/bootstrap-master.sh\"\n end\n end\n\n config.vm.define \"proxy1\" do |proxy1|\n hostname = \"proxy1\"\n proxy1.vm.network :private_network, ip: IPS[hostname]\n proxy1.vm.provision \"shell\" do |s|\n s.args = [IPS[\"master\"], hostname]\n s.path = \"bin/bootstrap-proxy.sh\"\n end\n end\n\n config.vm.define \"app1\" do |app1|\n hostname = \"app1\"\n app1.vm.network :private_network, ip: IPS[hostname]\n app1.vm.provision \"shell\" do |s|\n s.args = [IPS[\"master\"], hostname]\n s.path = \"bin/bootstrap-app.sh\"\n end\n end\n\n config.vm.define \"proxy2\" do |proxy2|\n hostname = \"proxy2\"\n proxy2.vm.network :private_network, ip: IPS[hostname]\n proxy2.vm.provision \"shell\" do |s|\n s.args = [IPS[\"master\"], hostname]\n s.path = \"bin/bootstrap-proxy.sh\"\n end\n end\n\n config.vm.define \"app2\" do |app2|\n hostname = \"app2\"\n app2.vm.network :private_network, ip: IPS[hostname]\n app2.vm.provision \"shell\" do |s|\n s.args = [IPS[\"master\"], hostname]\n s.path = \"bin/bootstrap-app.sh\"\n end\n end\nend\n" }, { "alpha_fraction": 0.5747368335723877, "alphanum_fraction": 0.6105263233184814, "avg_line_length": 12.970588684082031, "blob_id": "30341ffc23f92823ca14af9bd8aba205ba3539bd", "content_id": "6898023b5a8483525600a6ea6bff03ecb9c3f13b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 475, "license_type": "no_license", "max_line_length": 27, "num_lines": 34, "path": "/go-by-example/recursion.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc fib(n int) int {\n\tif n <= 1 {\n\t\treturn 1\n\t}\n\treturn fib(n-1) + fib(n-2)\n}\n\nfunc fact(n int) int {\n\tif n == 0 {\n\t\treturn 1\n\t}\n\treturn n * fact(n-1)\n}\n\nfunc main() {\n\tfmt.Println(\"fib:\")\n\tfmt.Println(fib(0))\n\tfmt.Println(fib(1))\n\tfmt.Println(fib(2))\n\tfmt.Println(fib(3))\n\tfmt.Println(fib(4))\n\tfmt.Println(\"\")\n\n\tfmt.Println(\"fact:\")\n\tfmt.Println(fact(0))\n\tfmt.Println(fact(1))\n\tfmt.Println(fact(2))\n\tfmt.Println(fact(3))\n\tfmt.Println(fact(4))\n}\n" }, { "alpha_fraction": 0.6263157725334167, "alphanum_fraction": 0.6473684310913086, "avg_line_length": 20.11111068725586, "blob_id": "b8c030dfaa87a84c41ef27eacf147a3e32360d48", "content_id": "f11ca6bbbe689270301679830caf38af6fa45fb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 190, "license_type": "no_license", "max_line_length": 70, "num_lines": 9, "path": "/algorithms/graph/relax.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nfunc relax(u, v int, weight float64, shortest []float64, prev []int) {\n\tnewPath := shortest[u] + weight\n\tif newPath < shortest[v] {\n\t\tshortest[v] = newPath\n\t\tprev[v] = u\n\t}\n}\n" }, { "alpha_fraction": 0.4611727297306061, "alphanum_fraction": 0.5071315169334412, "avg_line_length": 13.340909004211426, "blob_id": "5ba13b2541429664c5c75b7d44b13ba4184a1784", "content_id": "3be3fefabb9087075947fc454770c05aab0cb655", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 631, "license_type": "no_license", "max_line_length": 45, "num_lines": 44, "path": "/the-go-programming-language/ch4/src/slices/uniq.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc main() {\n\ts1 := []string{\"a\", \"a\", \"b\", \"b\"}\n\ts1 = uniq(s1)\n\tfmt.Println(s1)\n\n\ts2 := []string{\"a\", \"a\", \"b\", \"b\", \"c\", \"b\"}\n\ts2 = uniq(s2)\n\tfmt.Println(s2)\n\n\ts3 := []string{}\n\ts3 = uniq(s3)\n\tfmt.Println(s3)\n\n\ts4 := []string{\"a\"}\n\ts4 = uniq(s4)\n\tfmt.Println(s4)\n\n\ts5 := []string{\"a\", \"b\"}\n\ts5 = uniq(s5)\n\tfmt.Println(s5)\n\n\ts6 := []string{\"a\", \"a\"}\n\ts6 = uniq(s6)\n\tfmt.Println(s6)\n}\n\nfunc uniq(s []string) []string {\n\tif len(s) == 0 {\n\t\treturn s\n\t}\n\tlast := s[0]\n\tdropped := 0\n\tfor i, v := range s[1:] {\n\t\tif v == last {\n\t\t\tdropped++\n\t\t}\n\t\ts[i+1-dropped], last = v, v\n\t}\n\treturn s[:len(s)-dropped]\n}\n" }, { "alpha_fraction": 0.5855855941772461, "alphanum_fraction": 0.6407657861709595, "avg_line_length": 19.65116310119629, "blob_id": "2b34fd76727b07b17ea3ef11ec05938b43066426", "content_id": "ca07137a966ec8a23f412702a8626eb0cbff49f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 888, "license_type": "no_license", "max_line_length": 53, "num_lines": 43, "path": "/algorithms/search/linear_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package search_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/search\"\n)\n\nfunc TestLinearSearchCorrectness(t *testing.T) {\n\tdata := []int{0, 1, 6, 90, 3, 42, 71, 9, 53, 567}\n\n\tt.Run(\"element exists\", func(t *testing.T) {\n\t\tresult := search.Linear(data, 90)\n\t\tif result != 3 {\n\t\t\tt.Fatal(cmp.Diff(result, 3))\n\t\t}\n\t})\n\tt.Run(\"element does not exist\", func(t *testing.T) {\n\t\tresult := search.Linear(data, 999)\n\t\tif result != -1 {\n\t\t\tt.Fatal(cmp.Diff(result, -1))\n\t\t}\n\t})\n}\n\nfunc BenchmarkLinearBestCase(b *testing.B) {\n\tfor n := 0; n < b.N; n++ {\n\t\tsearch.Linear(benchmarkData, 8191711)\n\t}\n}\n\nfunc BenchmarkLinearAverageCase(b *testing.B) {\n\tfor n := 0; n < b.N; n++ {\n\t\tsearch.Linear(benchmarkData, 8348554)\n\t}\n}\n\nfunc BenchmarkLinearWorstCase(b *testing.B) {\n\tfor n := 0; n < b.N; n++ {\n\t\tsearch.Linear(benchmarkData, 6309519)\n\t}\n}\n" }, { "alpha_fraction": 0.38235294818878174, "alphanum_fraction": 0.406862735748291, "avg_line_length": 12.600000381469727, "blob_id": "0cea84ea8d3d8c650197d998cc1a509c3d38a068", "content_id": "94cc1ee7a660005e0cf88e65c4961b4d54b04a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 204, "license_type": "no_license", "max_line_length": 30, "num_lines": 15, "path": "/algorithms/sort/insertion.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort\n\nfunc Insertion(a []int) {\n\tfor i := 1; i < len(a); i++ {\n\t\tkey := a[i]\n\t\tvar j int\n\t\tfor j = i - 1; j >= 0; j-- {\n\t\t\tif a[j] <= key {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ta[j+1] = a[j]\n\t\t}\n\t\ta[j+1] = key\n\t}\n}\n" }, { "alpha_fraction": 0.7816091775894165, "alphanum_fraction": 0.8160919547080994, "avg_line_length": 28, "blob_id": "fe74aa3be993db1b98c82bc1533f73a387c4ab17", "content_id": "b968d8cb26e7132f27194ce3fe5019c584cf51ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 87, "license_type": "no_license", "max_line_length": 52, "num_lines": 3, "path": "/frida/tracee/go.mod", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "module github.com/jasonkeene/playground/frida/tracee\n\nrequire golang.org/x/text v0.3.0\n" }, { "alpha_fraction": 0.7379912734031677, "alphanum_fraction": 0.7379912734031677, "avg_line_length": 30.204545974731445, "blob_id": "656593310f576059589796629a80e58add7a7665", "content_id": "ec07ef8caac168dc84436756921f74ccf415a1e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1374, "license_type": "no_license", "max_line_length": 130, "num_lines": 44, "path": "/zeromq-the-guide/chapter1/Makefile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nall: hello version weather ventilator worker sink\n.PHONY: all\n\nbin:\n\t[ ! -d bin ] && mkdir bin\n\nlib:\n\t[ ! -d lib ] && mkdir lib\n\nhello: hello_server hello_client\n.PHONY: hello\n\nhello_server: bin\n\tclang -Wall -Wno-unused-function hello_server.c -o bin/hello_server -L/opt/local/lib -I/opt/local/include -lzmq\n\nhello_client: bin\n\tclang -Wall -Wno-unused-function hello_client.c -o bin/hello_client -L/opt/local/lib -I/opt/local/include -lzmq\n\nversion: bin\n\tclang -Wall -Wno-unused-function version.c -o bin/version -L/opt/local/lib -I/opt/local/include -lzmq\n\nweather: weather_server weather_client\n.PHONY: weather\n\nweather_lib: lib\n\tclang -Wall -c weather.c -o lib/weather.o\n\nweather_server: bin weather_lib\n\tclang -Wall -Wno-unused-function lib/weather.o weather_server.c -o bin/weather_server -L/opt/local/lib -I/opt/local/include -lzmq\n\nweather_client: bin weather_lib\n\tclang -Wall -Wno-unused-function lib/weather.o weather_client.c -o bin/weather_client -L/opt/local/lib -I/opt/local/include -lzmq\n\nventilator: bin\n\tclang -Wall -Wno-unused-function ventilator.c -o bin/ventilator -L/opt/local/lib -I/opt/local/include -lzmq\n\nworker: bin\n\tclang -Wall -Wno-unused-function worker.c -o bin/worker -L/opt/local/lib -I/opt/local/include -lzmq\n\nsink: bin\n\tclang -Wall -Wno-unused-function sink.c -o bin/sink -L/opt/local/lib -I/opt/local/include -lzmq\n\nclean:\n\trm -r bin lib\n" }, { "alpha_fraction": 0.46251943707466125, "alphanum_fraction": 0.4674960970878601, "avg_line_length": 14.165094375610352, "blob_id": "0ae42538ca944e8a85b595615972e7e4a3c58595", "content_id": "fa404bb0e8f3ef0215469966689419e690d0575b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3215, "license_type": "no_license", "max_line_length": 51, "num_lines": 212, "path": "/parsers/ini/lex/lex_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package lex_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/parsers/ini/lex\"\n)\n\nfunc TestLex(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tinput string\n\t\texpectedTokens []lex.Token\n\t}{\n\t\t\"empty\": {\n\t\t\tinput: \"\",\n\t\t\texpectedTokens: []lex.Token{\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEOF,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"extra whitespace\": {\n\t\t\tinput: `\n\t\t\t\t[section]\n\t\t\t\tkey = val\n\t\t\t`,\n\t\t\texpectedTokens: []lex.Token{\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenLeftBracket,\n\t\t\t\t\tValue: \"[\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenSection,\n\t\t\t\t\tValue: \"section\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenRightBracket,\n\t\t\t\t\tValue: \"]\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenKey,\n\t\t\t\t\tValue: \"key\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEqualSign,\n\t\t\t\t\tValue: \"=\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenValue,\n\t\t\t\t\tValue: \"val\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEOF,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"normal\": {\n\t\t\tinput: `\n\n[sectionA]\nkeyA1=valA1\nkeyA2=valA2\n\n[sectionB]\nkeyB1=valB1\nkeyB2=valB2\n\n`,\n\t\t\texpectedTokens: []lex.Token{\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenLeftBracket,\n\t\t\t\t\tValue: \"[\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenSection,\n\t\t\t\t\tValue: \"sectionA\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenRightBracket,\n\t\t\t\t\tValue: \"]\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenKey,\n\t\t\t\t\tValue: \"keyA1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEqualSign,\n\t\t\t\t\tValue: \"=\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenValue,\n\t\t\t\t\tValue: \"valA1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenKey,\n\t\t\t\t\tValue: \"keyA2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEqualSign,\n\t\t\t\t\tValue: \"=\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenValue,\n\t\t\t\t\tValue: \"valA2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenLeftBracket,\n\t\t\t\t\tValue: \"[\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenSection,\n\t\t\t\t\tValue: \"sectionB\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenRightBracket,\n\t\t\t\t\tValue: \"]\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenKey,\n\t\t\t\t\tValue: \"keyB1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEqualSign,\n\t\t\t\t\tValue: \"=\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenValue,\n\t\t\t\t\tValue: \"valB1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenKey,\n\t\t\t\t\tValue: \"keyB2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEqualSign,\n\t\t\t\t\tValue: \"=\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenValue,\n\t\t\t\t\tValue: \"valB2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenNewLine,\n\t\t\t\t\tValue: \"\\n\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: lex.TokenEOF,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tl := lex.NewLexer(tc.input)\n\t\t\tl.Run()\n\n\t\t\tif !cmp.Equal(l.Tokens, tc.expectedTokens) {\n\t\t\t\tt.Error(cmp.Diff(l.Tokens, tc.expectedTokens))\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.41370558738708496, "alphanum_fraction": 0.5685279369354248, "avg_line_length": 22.176469802856445, "blob_id": "747458762255142b19d6303993ff8046f7df0688", "content_id": "77cb18ab4b95df06f4db0574080c29ac980ba010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 394, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/concurrent-prime-sieve/sieve_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"testing\"\n\nfunc TestKnownPrimes(t *testing.T) {\n\tprime_chan := Primes()\n\tknown_primes := []int{\n\t\t2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,\n\t\t59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113,\n\t}\n\tfor _, prime := range known_primes {\n\t\tgot := <-prime_chan\n\t\tif prime != got {\n\t\t\tt.Errorf(\"bad prime, expected: %d, got: %d\", prime, got)\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.49877750873565674, "alphanum_fraction": 0.5110024213790894, "avg_line_length": 30.30769157409668, "blob_id": "bb5a26846f8f283de06ae2ad9721ac92c4cdd83e", "content_id": "aa63dc0734af0ccfa85ce5ec356df270c9063546", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 73, "num_lines": 13, "path": "/leetcode/pascals-triangle/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n\ndef pascals_triangle(rows=5):\n triangle = []\n previous_row = []\n for i in range(rows):\n current_row = []\n for j in range(i + 1):\n if j == 0 or j == i:\n current_row.append(1)\n else:\n current_row.append(previous_row[j - 1] + previous_row[j])\n triangle.append(current_row)\n previous_row = current_row\n return triangle\n" }, { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6611111164093018, "avg_line_length": 31.10714340209961, "blob_id": "a27ab45dcb9d2a84ced1edc718869819c6baf4ac", "content_id": "a8d952ae5d8b35efbb8b8acd843882a14a6f2df0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 900, "license_type": "no_license", "max_line_length": 91, "num_lines": 28, "path": "/x86-64-assembly-language-programming-with-ubuntu/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [x86-64 Assembly Language Programming with Ubuntu](http://www.egr.unlv.edu/~ed/x86.html)\n\n - [x] Chapter 1 - Introduction\n - [x] Chapter 2 - Architecture Overview\n - [x] Chapter 3 - Data Representation\n - [x] Chapter 4 - Program Format\n - [x] Chapter 5 - Tool Chain\n\n### Need to do Exercises\n\n - [ ] Chapter 6 - DDD Debugger\n - [ ] Chapter 7 - Instruction Set Overview\n - [ ] Chapter 8 - Addressing Modes\n - [ ] Chapter 9 - Process Stack\n\n### Need to Read and do Exercises\n\n - [ ] Chapter 10 - Program Development\n - [ ] Chapter 11 - Macros\n - [ ] Chapter 12 - Functions\n - [ ] Chapter 13 - System Services\n - [ ] Chapter 14 - Multiple Source Files\n - [ ] Chapter 15 - Stack Buffer Overflow\n - [ ] Chapter 16 - Command Line Arguments\n - [ ] Chapter 17 - Input/Output Buffering\n - [ ] Chapter 18 - Floating-Point Instructions\n - [ ] Chapter 19 - Parallel Processing\n - [ ] Chapter 20 - Interrupts\n" }, { "alpha_fraction": 0.40195122361183167, "alphanum_fraction": 0.4399999976158142, "avg_line_length": 17.981481552124023, "blob_id": "3cd84b37e31c7704de47435369c9e87b485413bb", "content_id": "793966cf7b524dcb45337c8956446c42d647b35b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1025, "license_type": "no_license", "max_line_length": 51, "num_lines": 54, "path": "/golang-tour/methods_and_interfaces/12_exercise_rot13reader.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n \"io\"\n \"os\"\n \"strings\"\n)\n\ntype rot13Reader struct {\n r io.Reader\n}\n\nfunc (r rot13Reader) Read(b []byte) (int, error) {\n n := len(b)\n tmp := make([]byte, n)\n for {\n // read out of source\n count, err := r.r.Read(tmp)\n\n // error checking on read\n if err == io.EOF {\n return 0, io.EOF\n } else if err != nil {\n return 0, err\n }\n\n // map characters\n rot13(tmp)\n\n // write out to dest\n copy(b, tmp)\n return count, nil\n }\n}\n\n// rot13 a given slice of bytes\nfunc rot13(b []byte) {\n for i := range b {\n if b[i] >= 65 && b[i] <= 90 {\n // byte is A-Z\n b[i] = (b[i] + 13 - 65) % 26 + 65\n } else if b[i] >= 97 && b[i] <= 122 {\n // byte is a-z\n b[i] = (b[i] + 13 - 97) % 26 + 97\n }\n }\n}\n\nfunc main() {\n s := strings.NewReader(\"Lbh penpxrq gur pbqr!\")\n r := rot13Reader{s}\n io.Copy(os.Stdout, &r)\n print(\"\\n\")\n}\n" }, { "alpha_fraction": 0.702531635761261, "alphanum_fraction": 0.702531635761261, "avg_line_length": 57.157894134521484, "blob_id": "39333544305387c2d38baac7eaf8ae7247ae33ed", "content_id": "cafa66058ef07f14e60dfe54731ff2e0bf176fd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1106, "license_type": "no_license", "max_line_length": 89, "num_lines": 19, "path": "/docker/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Documentation](https://docs.docker.com/)\n\n - [ ] [About Docker](https://docs.docker.com)\n - [ ] [Understanding Docker](https://docs.docker.com/introduction/understanding-docker/)\n - [x] [Install Docker on Mac OS X](https://docs.docker.com/installation/mac/)\n\n#### User Guide\n\n - [ ] [The Docker User Guide](https://docs.docker.com/userguide/)\n - [ ] [Getting Started with Docker Hub](https://docs.docker.com/userguide/dockerhub/)\n - [ ] [Dockerizing Applications](https://docs.docker.com/userguide/dockerizing/)\n - [ ] [Working with Containers](https://docs.docker.com/userguide/usingdocker/)\n - [ ] [Working with Docker Images](https://docs.docker.com/userguide/dockerimages/)\n - [ ] [Linking containers together](https://docs.docker.com/userguide/dockerlinks/)\n - [ ] [Managing data in containers](https://docs.docker.com/userguide/dockervolumes/)\n - [ ] [Working with Docker Hub](https://docs.docker.com/userguide/dockerrepos/)\n - [ ] [Docker Compose](https://docs.docker.com/compose/)\n - [ ] [Docker Machine](https://docs.docker.com/machine/)\n - [ ] [Docker Swarm](https://docs.docker.com/swarm/)\n" }, { "alpha_fraction": 0.5736863017082214, "alphanum_fraction": 0.5784274935722351, "avg_line_length": 19.24799919128418, "blob_id": "eb7e5f8e73df465c733e76900e1731007e7e6246", "content_id": "a29278c6ccb41e08611f50c9ced800f236aeefee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2531, "license_type": "no_license", "max_line_length": 84, "num_lines": 125, "path": "/lcs/lcs_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package lcs_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/jasonkeene/playground/lcs\"\n)\n\nvar fixtures = map[string]struct {\n\tA string\n\tB string\n\tResult string\n\tDiffA string\n\tDiffB string\n}{\n\t\"sub\": {\n\t\tA: \"I am waiting.\",\n\t\tB: \"I am always waiting.\",\n\t\tResult: \"I am waiting.\",\n\t\tDiffA: \"I am waiting.\",\n\t\tDiffB: \"I am +<al>wa+<ys wa>iting.\",\n\t},\n\t\"sub_small\": {\n\t\tA: \".\",\n\t\tB: \"I.\",\n\t\tResult: \".\",\n\t\tDiffA: \".\",\n\t\tDiffB: \"+<I>.\",\n\t},\n\t\"super\": {\n\t\tA: \"I am waiting.\",\n\t\tB: \"I wait.\",\n\t\tResult: \"I wait.\",\n\t\tDiffA: \"I -<am >wait-<ing>.\",\n\t\tDiffB: \"I wait.\",\n\t},\n\t\"super_small\": {\n\t\tA: \"I.\",\n\t\tB: \".\",\n\t\tResult: \".\",\n\t\tDiffA: \"-<I>.\",\n\t\tDiffB: \".\",\n\t},\n\t\"diff\": {\n\t\tA: \"I am waiting.\",\n\t\tB: \"I've been waiting.\",\n\t\tResult: \"I waiting.\",\n\t\tDiffA: \"I -<am> waiting.\",\n\t\tDiffB: \"I+<'ve> +<been> waiting.\",\n\t},\n}\n\nfunc test(t *testing.T, tf func(string, string) string) {\n\tfor k, f := range fixtures {\n\t\tresult := tf(f.A, f.B)\n\t\tif result != f.Result {\n\t\t\tt.Fatalf(\"Bad result for test %s: %q != %q\", k, result, f.Result)\n\t\t}\n\t}\n}\n\nfunc TestRecursive(t *testing.T) {\n\ttest(t, lcs.Recursive)\n}\n\nfunc TestRecursiveMemoized(t *testing.T) {\n\ttest(t, func(a, b string) string {\n\t\treturn lcs.RecursiveMemoized(a, b, map[lcs.MemKey]string{})\n\t})\n}\n\nfunc TestRecursiveIndexes(t *testing.T) {\n\ttest(t, func(a, b string) string {\n\t\treturn lcs.RecursiveIndexes(a, b, 0, 0)\n\t})\n}\n\nfunc TestRecursiveIndexesMemoized(t *testing.T) {\n\ttest(t, func(a, b string) string {\n\t\treturn lcs.RecursiveIndexesMemoized(a, b, 0, 0, map[lcs.MemKeyIndexes]string{})\n\t})\n}\n\nconst (\n\tbenchA = \"I am waiting.\"\n\tbenchB = \"I've been waiting.\"\n)\n\nfunc BenchmarkRecursive(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tlcs.Recursive(benchA, benchB)\n\t}\n}\n\nfunc BenchmarkRecursiveMemoized(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tlcs.RecursiveMemoized(benchA, benchB, map[lcs.MemKey]string{})\n\t}\n}\n\nfunc BenchmarkRecursiveIndexes(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tlcs.RecursiveIndexes(benchA, benchB, 0, 0)\n\t}\n}\n\nfunc BenchmarkRecursiveIndexesMemoized(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tlcs.RecursiveIndexesMemoized(benchA, benchB, 0, 0, map[lcs.MemKeyIndexes]string{})\n\t}\n}\n\nfunc TestDiff(t *testing.T) {\n\tfor _, f := range fixtures {\n\t\tresult := lcs.Diff(f.A, f.B)\n\t\tfmt.Printf(\"%#v\\n\", result)\n\t\tif result.A != f.DiffA {\n\t\t\tt.Fatalf(\"Bad result: %q != %q\", result.A, f.DiffA)\n\t\t}\n\t\tif result.B != f.DiffB {\n\t\t\tt.Fatalf(\"Bad result: %q != %q\", result.B, f.DiffB)\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5128304958343506, "alphanum_fraction": 0.5171073079109192, "avg_line_length": 22.171171188354492, "blob_id": "87b911029701e95b0d749b3f8de1931f593d0c55", "content_id": "38cc375f062a317a24f68d80f938879b77a71f08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2572, "license_type": "no_license", "max_line_length": 67, "num_lines": 111, "path": "/golang-tour/concurrency/09_exercise_web_crawler.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n \"fmt\"\n)\n\ntype Fetcher interface {\n // Fetch returns the body of URL and\n // a slice of URLs found on that page.\n Fetch(url string) (body string, urls []string, err error)\n}\n\n// Crawl uses fetcher to recursively crawl\n// pages starting with url, to a maximum of depth.\nfunc Crawl(url string, depth int, fetcher Fetcher, signal chan int,\n visited *[]string) {\n // keep track of visited urls\n for _, s := range *visited {\n if url == s {\n fmt.Println(\"Already visited url \", url)\n signal <- -1\n return\n }\n }\n *visited = append(*visited, url)\n\n // check if max depth exceeded\n if depth <= 0 {\n signal <- -1\n return\n }\n\n // do the fetch\n body, urls, err := fetcher.Fetch(url)\n if err != nil {\n fmt.Println(err)\n signal <- -1\n return\n }\n fmt.Printf(\"found: %s %q\\n\", url, body)\n\n // crawl children\n for _, u := range urls {\n signal <- 1\n go Crawl(u, depth-1, fetcher, signal, visited)\n }\n\n signal <- -1\n return\n}\n\nfunc main() {\n signal := make(chan int)\n visited := make([]string, 0)\n go Crawl(\"http://golang.org/\", 4, fetcher, signal, &visited)\n for n := 1; n > 0; {\n select {\n case v := <-signal:\n n += v\n }\n }\n}\n\n// fakeFetcher is Fetcher that returns canned results.\ntype fakeFetcher map[string]*fakeResult\n\ntype fakeResult struct {\n body string\n urls []string\n}\n\nfunc (f fakeFetcher) Fetch(url string) (string, []string, error) {\n if res, ok := f[url]; ok {\n return res.body, res.urls, nil\n }\n return \"\", nil, fmt.Errorf(\"not found: %s\", url)\n}\n\n// fetcher is a populated fakeFetcher.\nvar fetcher = fakeFetcher{\n \"http://golang.org/\": &fakeResult{\n \"The Go Programming Language\",\n []string{\n \"http://golang.org/pkg/\",\n \"http://golang.org/cmd/\",\n },\n },\n \"http://golang.org/pkg/\": &fakeResult{\n \"Packages\",\n []string{\n \"http://golang.org/\",\n \"http://golang.org/cmd/\",\n \"http://golang.org/pkg/fmt/\",\n \"http://golang.org/pkg/os/\",\n },\n },\n \"http://golang.org/pkg/fmt/\": &fakeResult{\n \"Package fmt\",\n []string{\n \"http://golang.org/\",\n \"http://golang.org/pkg/\",\n },\n },\n \"http://golang.org/pkg/os/\": &fakeResult{\n \"Package os\",\n []string{\n \"http://golang.org/\",\n \"http://golang.org/pkg/\",\n },\n },\n}\n" }, { "alpha_fraction": 0.5525902509689331, "alphanum_fraction": 0.5671114325523376, "avg_line_length": 18.600000381469727, "blob_id": "ff80fbb5c579021b4215c6fd1467bbe33e63c437", "content_id": "a06b38cd323dc586d6fcca1f58d6248f138a6acc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2548, "license_type": "no_license", "max_line_length": 70, "num_lines": 130, "path": "/algorithms/str/transform.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package str\n\nimport (\n\t\"fmt\"\n\t\"log\"\n)\n\ntype Operation struct {\n\tType OperationType\n\tA byte\n\tB byte\n}\n\nfunc (o Operation) String() string {\n\tswitch o.Type {\n\tcase Copy, Delete, Insert:\n\t\treturn fmt.Sprintf(\"%s %s\", o.Type, string(o.A))\n\tcase Replace:\n\t\treturn fmt.Sprintf(\"%s %s by %s\", o.Type, string(o.A), string(o.B))\n\t}\n\treturn \"\"\n}\n\n//go:generate stringer -type OperationType\n\ntype OperationType int\n\nconst (\n\tNoOp OperationType = iota\n\tCopy\n\tReplace\n\tDelete\n\tInsert\n)\n\nvar operationCosts = map[OperationType]int{\n\tCopy: -1,\n\tReplace: 1,\n\tDelete: 2,\n\tInsert: 2,\n}\n\nfunc Transform(a, b string) []Operation {\n\t_, ops := computeTables(a, b)\n\treturn assembleTransformation(ops, len(a), len(b))\n}\n\nfunc assembleTransformation(ops [][]Operation, i, j int) []Operation {\n\tif i == 0 && j == 0 {\n\t\treturn nil\n\t}\n\n\top := ops[i][j]\n\tswitch op.Type {\n\tcase Replace, Copy:\n\t\treturn append(assembleTransformation(ops, i-1, j-1), ops[i][j])\n\tcase Delete:\n\t\treturn append(assembleTransformation(ops, i-1, j), ops[i][j])\n\tcase Insert:\n\t\treturn append(assembleTransformation(ops, i, j-1), ops[i][j])\n\t}\n\tlog.Panicf(\"Invalid operation type: %s (%#v)\", op.Type, op.Type)\n\treturn nil\n}\n\nfunc computeTables(a, b string) ([][]int, [][]Operation) {\n\tcosts := make([][]int, len(a)+1)\n\tops := make([][]Operation, len(a)+1)\n\tfor i := range costs {\n\t\tcosts[i] = make([]int, len(b)+1)\n\t\tops[i] = make([]Operation, len(b)+1)\n\n\t\tif i == 0 {\n\t\t\tfor j := range costs[i] {\n\t\t\t\tif j == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcosts[i][j] = j * operationCosts[Insert]\n\t\t\t\tops[i][j] = Operation{\n\t\t\t\t\tType: Insert,\n\t\t\t\t\tA: b[j-1],\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tcosts[i][0] = i * operationCosts[Delete]\n\t\tops[i][0] = Operation{\n\t\t\tType: Delete,\n\t\t\tA: (a[i-1]),\n\t\t}\n\t}\n\n\tfor i := 1; i < len(costs); i++ {\n\t\tfor j := 1; j < len(costs[i]); j++ {\n\t\t\tif a[i-1] == b[j-1] {\n\t\t\t\tcosts[i][j] = costs[i-1][j-1] + operationCosts[Copy]\n\t\t\t\tops[i][j] = Operation{\n\t\t\t\t\tType: Copy,\n\t\t\t\t\tA: a[i-1],\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcosts[i][j] = costs[i-1][j-1] + operationCosts[Replace]\n\t\t\t\tops[i][j] = Operation{\n\t\t\t\t\tType: Replace,\n\t\t\t\t\tA: a[i-1],\n\t\t\t\t\tB: b[j-1],\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif costs[i][j] > costs[i][j-1]+operationCosts[Insert] {\n\t\t\t\tcosts[i][j] = costs[i][j-1] + operationCosts[Insert]\n\t\t\t\tops[i][j] = Operation{\n\t\t\t\t\tType: Insert,\n\t\t\t\t\tA: b[j-1],\n\t\t\t\t}\n\t\t\t}\n\t\t\tif costs[i][j] > costs[i-1][j]+operationCosts[Delete] {\n\t\t\t\tcosts[i][j] = costs[i-1][j] + operationCosts[Delete]\n\t\t\t\tops[i][j] = Operation{\n\t\t\t\t\tType: Delete,\n\t\t\t\t\tA: a[i-1],\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn costs, ops\n}\n" }, { "alpha_fraction": 0.6098601818084717, "alphanum_fraction": 0.6157857179641724, "avg_line_length": 36.32743453979492, "blob_id": "0806b115ef35834f6502f1faa2430d1c5b6132c3", "content_id": "49a5d1404b8aaff6b6ae35113a319886a409ba7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4219, "license_type": "no_license", "max_line_length": 104, "num_lines": 113, "path": "/digital-fundamentals/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Digital Fundamentals](http://www.amazon.com/Digital-Fundamentals-Edition-Thomas-Floyd/dp/0132359235)\n\n - [ ] Chapter 1: Introductory Concepts\n - [x] Digital and Analog Quantities\n - [x] Binary Digits, Logic Levels, and Digital Waveforms\n - [x] Basic Logic Operations\n - [x] Introduction to the System Concept\n - [ ] Digital Integrated Circuits\n - [ ] Chapter 2: Number Systems, Operations, and Codes\n - [ ] Decimal Numbers\n - [ ] Binary Numbers\n - [ ] Decimal-to-Binary Conversion\n - [ ] Binary Arithmetic\n - [ ] 1's and 2's Complements of Binary Numbers\n - [ ] Signed Numbers\n - [ ] Arithmetic Operations with Signed Numbers\n - [ ] Hexadecimal Numbers\n - [ ] Octal Numbers\n - [ ] Binary Coded Decimal (BCD)\n - [ ] Digital Codes\n - [ ] Error Detection Codes\n - [ ] Chapter 3: Logic Gates\n - [ ] The Inverter\n - [ ] The AND Gate\n - [ ] The OR Gate\n - [ ] The NAND Gate\n - [ ] The NOR Gate\n - [ ] The Exclusive-OR and Exclusive-NOR Gates\n - [ ] Basics of Digital Integrated Circuits\n - [ ] Chapter 4: Boolean Algebra and Logic Simplification\n - [ ] Boolean Operations and Expressions\n - [ ] Laws and Rules of Boolean Algebra\n - [ ] DeMorgan's Theorems\n - [ ] Boolean Analysis of Logic Circuits\n - [ ] Simplification Using Boolean Algebra\n - [ ] Standard Forms of Boolean Expressions\n - [ ] Boolean Expressions and Truth Tables\n - [ ] The Karnaugh Map\n - [ ] Karnaugh Map SOP Minimization\n - [ ] Five-Variable Karnaugh Maps\n - [ ] System Application\n - [ ] Chapter 5: Combinational Logic Analysis\n - [ ] Basic Combinational Logic Circuits\n - [ ] Implementing Combinational Logic\n - [ ] The Universal Property of NAND and NOR Gates\n - [ ] Combinational Logic Using NAND and NOR Gates\n - [ ] Logic Circuit Operation with Pulse Waveform Inputs\n - [ ] System Application\n - [ ] Chapter 6: Functions of Combinational Logic\n - [ ] Basic Adders\n - [ ] Parallel Binary Adders\n - [ ] Ripple Carry versus Look-Ahead Carry Adders\n - [ ] Comparators\n - [ ] Decoders\n - [ ] Encoders\n - [ ] Code Converters\n - [ ] Multiplexers (Data Selectors)\n - [ ] Demultiplexers\n - [ ] Parity Generators/Checkers\n - [ ] Decoder Glitches\n - [ ] System Application\n - [ ] Chapter 7: Latches and Flip-Flops\n - [ ] Latches\n - [ ] Edge-Triggered Flip-Flops\n - [ ] Flip-Flop Operating Characteristics\n - [ ] Flip-Flop Applications\n - [ ] System Application\n - [ ] Chapter 8: Counters\n - [ ] Asynchronous Counters\n - [ ] Synchronous Counters\n - [ ] Up/Down Synchronous Counters\n - [ ] Design of Synchronous Counters\n - [ ] Cascaded Counters\n - [ ] Counter Decoding\n - [ ] Counter Applications\n - [ ] Logic Symbols with Dependency Notation\n - [ ] System Application\n - [ ] Chapter 9: Shift Registers\n - [ ] Basic Shift Register Operations\n - [ ] Serial In/Serial Out Shift Registers\n - [ ] Serial In/Parallel Out Shift Registers\n - [ ] Parallel In/Serial Out Shift Registers\n - [ ] Parallel In/Parallel Out Shift Registers\n - [ ] Bidirectional Shift Registers\n - [ ] Shift Register Counters\n - [ ] Shift Register Applications\n - [ ] Logic Symbols with Dependency Notation\n - [ ] System Application\n - [ ] Chapter 10: Memory and Storage\n - [ ] Memory Basics\n - [ ] The Random-Access Memory (RAM)\n - [ ] The Read-Only Memory (ROM)\n - [ ] Programmable ROMs\n - [ ] The Flash Memory\n - [ ] Memory Expansion\n - [ ] Special Types of Memories\n - [ ] Magnetic and Optical Storage\n - [ ] Testing Memory Chips\n - [ ] System Application\n - [ ] Chapter 11: Programmable Logic and Software\n - [ ] Introduction to Programmable Logic\n - [ ] Programmable Logic\n - [ ] Describing Logic with an HDL\n - [ ] Combinational Logic with VHDL\n - [ ] Programmable Logic: SPLDs and CPLDs\n - [ ] Altera CPLDs\n - [ ] Xilinx CPLDs\n - [ ] Macrocells\n - [ ] Programmable Logic: FPGAs\n - [ ] Altera FPGAs\n - [ ] Xilinx FPGAs\n - [ ] Programmable Logic Software\n - [ ] System Application\n" }, { "alpha_fraction": 0.773191511631012, "alphanum_fraction": 0.7774034142494202, "avg_line_length": 41.774776458740234, "blob_id": "b79470a295348eb2001720c9435696a830dbd2b2", "content_id": "806abf97fbcd99ae70ee7a782e3995c87870794d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9512, "license_type": "no_license", "max_line_length": 78, "num_lines": 222, "path": "/algorithms-unlocked/notes.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## Chapter 1\n\nTwo desired properties of an algorithm:\n\n- Correctness\n- Resource Efficienty\n\nCorrectness is getting the right answer. Sometimes it is acceptable to have an\nalgorithm that gives wrong answers, such as the prime number check that is\npart of RSA which has an error rate of 2**50. Additionally, there is a class\nof algorithms called approximation algorithms that can give appoximate\nanswers. An example of this is finding the fastest route for GPS navigation.\n\nResources efficiency primarily pertains to:\n\n- Execution Time\n- Memory Space\n\nbut can also pertain to things like:\n\n- Network I/O\n- Disk I/O\n- Use of Entropy\n\nSince measuring execution time depends on factors outside the algorithm, such\nas execution speed of the machine, we evaluate algorithms based on the size of\ntheir inputs and how increasing that size affects the rate of growth in\nexecution time.\n\n## Chapter 2\n\nSentinel linear search is an optimization that prevents you from checking if\nyou are out of bounds of the array.\n\nIn the definition of big theta notation he says:\n\n> The idea is that if we have two functions f(n) and g(n) we say that f(n) is\n> Θ(g(n)) if f(n) is within a constant factor of g(n) for sufficiently large\n> n.\n\nThis was a bit confusing at first but what he is saying is:\n\n- g(n) is just some expression parametrized by n, say n**2\n- f(n) is a function, in our situation it is a function that maps n to the\n execution time, say something like 5n**2 + 3n + 20\n\nBig theta allows for an upper and lower bound that is of the form provided.\nThe actual upper and lower bound functions can be different but must have the\nsame form (same degree).\n\nBig O notation states the character of the upper bound only. Big omega\nnotation states the lower bound only. Therefore if a f(n) is Θ(g(n)) then by\ndefinition it is also O(g(n)) and Ω(g(n)).\n\nLoop invariants are important for demonstrating that the loop does the right\nthing. They consist of:\n\n- Initialization: Statement that is true before the first iteration\n- Maintenance: If it is true before an iteration of the loop, it remains true\n before the next iteration.\n- Termination: The loop terminates, and when it does, the loop invariant,\n along with the reason that loop terminated, gives us a useful property.\n\nFor recursive algorithms to work you need a base case and the recursive call\nmust be a smaller set of the problem that will eventually terminate with the\nbase case.\n\n## Chapter 3\n\nBinary search works in O(lg n) time but only on sorted arrays. You need to\nsort the array before using binary search. If you are only doing a few\nsearches then linear searching makes more sense.\n\nThere are four sorting algorithms covered in this chapter:\n\n- Selection Sort\n- Insertion Sort\n- Merge Sort\n- Quick Sort\n\nThe sort key is the information in the value of the element that is used to\nsort the array. Satellite data is information associated with the sort key\nthat needs to travel with the key when it is moved.\n\nSorting in algorithms differs from what one might consider sorting such as\nplacing clothes into different groups. This is called bucketing or binning.\n\nFor binary search, to find the time complexity, ask how many iterations of the\nloop do we need to repeatedly halve a subarray to get to size 1. This is the\nsame as the amount of times we would have to double the size of the array from\n1 to reach n. This can be expressed as:\n\n 2**x = n or log2(n) = x\n\nThis demonstrates that the loop is dependent on the input n by log2(n). Since\nthe rest of the algorithm is O(1) things makes binary search O(log2(n)).\n\nIt is possible to beat Θ(lg n) worst-case time for searching, but only if we\norganize data in more elaborate ways and make certain assumptions about the\nkeys.\n\nRearranging the elements of the array is called permuting the array.\n\nThe sumation of an arithmetic series is:\n\n n(a1 + an)/2\n\nWith selection sort, since the algorithm swaps in the outer loop there is only\nΘ(n) swaps. This might be useful if swapping elements is costly.\n\nInsertion sort's best case is Θ(n) if the array is already sorted. Its worst\ncase is Θ(n**2) if it is reverse sorted.\n\nInsertion sort is great if the data is already almost sorted as it can\napproach Θ(n) time.\n\nMerge sort runs in Θ(n lg n) time in all cases. However, it takes up more\nspace as it has to copy the elements of array into temporary memory in order\nto merge them. Merge sort uses the divide and conquer strategy. Divide and\nconquer splits a task into sub tasks, recursively solves those sub tasks, and\nthen joins the results.\n\nQuick sort uses divide and conquer as well. It is Θ(n**2) in the worst case\nbut in the average case it is Θ(n lg n). Quick sort is sorted in place so it\nuses less memory than merge sort and has better constant factors than merge\nsort. Quick sort's form is:\n\n- Choose the right most element as the pivot point.\n- Reorder the elements so that all elements that are lower than the value at\n the pivot point are to its left, all elements that are greater than the\n value at the pivot point are to its right.\n- Recursively apply sort to 0..pivot-1 and pivot+1..n-1.\n\nQuick sort's worst case is when it is already sorted or is reverse sorted. In\nthose situations I will take Θ(n**2). In order to get Θ(n lg n), before we\npartition we can simply swap the last element with a random element. This will\nmake getting the worst case quite difficult since it would rely on being realy\nunlucky in your random swaps.\n\n## Chapter 4\n\nCounting sort and Radix sort run in Θ(n) time by \"bending the rules\" so to\nspeak. Both of these algoritms work by not comparing keys directly to one\nanother. Comparison sorting is where the elements of the array are compared\nwith each other.\n\nFor counting sort the keys need to be in a range and are numerically indexed,\nie 0-500. The algorithm then counts the frequency of each of these values and\nuses this information to rearrange the values into a new array. This means it\ndoes not sort in place. This sort is stable and is Θ(m+n) and m is typically\nconstant.\n\nRadix sort uses a stable sort (such as a counting sort) to sort larger keys\nfrom right to left. The time complexity for this is dependent on the amount of\ndigits in the keys that are being sorted, however this is typically constant.\n\n## Chapter 5\n\nOne method for getting a linear ordering from a DAG is to start with an\nnode that has nothing pointing to it, remove it from the graph and place it\nin the list. Continue this process until there is no nodes remaining. Placing\nnodes in order like this is called topological sorting. There can be multiple\ndifferent sortings of a DAG that are correct.\n\nThere are a few representations for the edges in a graph:\n\n1. adjacency matrix: A two dimensional array where each row and column\n represents a vertex id. If there is a 1 at matrix[u][v] that represents an\n edge is present. This takes O(n**2) memory.\n2. unorderd list: This is just an unordered list of (u, v) doubles.\n3. adjacenty list: This is a hybrid of the other two representations. It is an\n array of lists.\n\nLists used here can either be array backed or a linked list.\n\nTopological sort takes O(n + m) time where n is the amount of vertices and m\nis the amount of edges.\n\nA PERT chart is a project management tool that can be represented as a DAG.\nThe critical path of a PERT chart is the path that taked the most time to\ncomplete. This represents the minimum time to complete the project as a whole.\nIf we negate the time it takes to complete each task then we can use a sortest\npath algorithm to solve for the critical path.\n\nOne way to compute the shortest path is to compute the smallest weight between\na starting vertex and all other vertices. If you also record the previous\nvertex that for each vertex along the shortest path you can then construct a\nsingle shortest path from u to v. To do this, first topological sort the DAG,\nthen relax each edge leaving each vertex in the toplological order.\n\n## Chapter 6\n\nDAGs can rely on topological sorting to find the single-source shortest path.\nThis is not possible with graphs that have cycles. Finding the shortest path\nbetween two vertices in a graph is called the single-pair shortest path.\n\nThe Erdos/Bacon number is the sum of your Erdos and Bacon numbers which are\nyour shortest paths to Paul Erdos and Kevin Bacon.\n\nOne problem with cyclic weighted graphs is if you are trying to find the\nshortest path and the weights are allowed to be negative and a cycle has an\nedge negative weight you can get stuck in a forever decreasing cycle. Real\nworld problems such as satnav directions avoid negative weights (what would it\nmean to have negative travel time, this would break physics some how I am\nsure). There are real world problems that involve graphs with cycles with\nnegative weights. One mentioned is determining if an arbitrage opportunity\nexists in currency trading.\n\nDijkstra's algorithm does not work on graphs that can have edges with negative\nweights but the graph can have cycles.\n\nRuntime analysis of Dijkstra's algorithm depends on the datastructure used to\nstore and retrieve vertices that have not been processed. In particular the\noperations are:\n\n- Insert(Q, v): inserts a vertex into the set, called n times\n- Extract-Min(Q): removes the vertex with minimum shortest value, called\n n times\n- Decrease-Key(Q, v): performs whatever bookkeeping is necessary to reorder\n vertices when the shortest path for v is decreased, called m times\n\nThese operations describe a priority queue.\n" }, { "alpha_fraction": 0.7339449524879456, "alphanum_fraction": 0.7339449524879456, "avg_line_length": 35.25, "blob_id": "679d07d07afd9e796c6d53c13975b65990e09dd6", "content_id": "f71d787ccfd510d4a2abf72dcb538d9f58f49055", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 436, "license_type": "no_license", "max_line_length": 76, "num_lines": 12, "path": "/algorithms-unlocked/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Algorithms Unlocked](https://mitpress.mit.edu/books/algorithms-unlocked)\n\n- [x] What Are Algorithms and Why Should You Care?\n- [x] How to Describe and Evaluate Computer Algorithms\n- [x] Algorithms for Sorting and Searching\n- [x] A Lower Bound for Sorting and How to Beat It\n- [x] Directed Acyclic Graphs\n- [x] Shortest Paths\n- [x] Algorithms on Strings\n- [x] Foundations of Cryptography\n- [x] Data Compression\n- [x] Hard? Problems\n" }, { "alpha_fraction": 0.6670564413070679, "alphanum_fraction": 0.6679333448410034, "avg_line_length": 14.985980987548828, "blob_id": "96f1f072e6173eaedb4f33dbe32b0f1a386c44d9", "content_id": "89adebad37d07d67aeb5b0320a4efc4dd5d4b1fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3421, "license_type": "no_license", "max_line_length": 70, "num_lines": 214, "path": "/parsers/flbconfig/lex.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package flbconfig\n\nimport (\n\t\"fmt\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\ntype Token struct {\n\tType TokenType\n\tValue string\n}\n\n//go:generate stringer -type=TokenType\n\ntype TokenType int\n\nconst (\n\tTokenError TokenType = iota\n\tTokenEOF\n\tTokenNewLine\n\n\tTokenLeftBracket\n\tTokenRightBracket\n\n\tTokenSection\n\tTokenKey\n\tTokenValue\n)\n\nconst (\n\tRuneTab = '\\t'\n\tRuneSpace = ' '\n\tRuneLeftBracket = '['\n\tRuneRightBracket = ']'\n\tRuneNewLine = '\\n'\n)\n\ntype StateFunc func(*Lexer) StateFunc\n\ntype Lexer struct {\n\tInput string\n\tTokens []Token\n\tState StateFunc\n\n\tStart int\n\tPos int\n}\n\nfunc NewLexer(input string) *Lexer {\n\treturn &Lexer{\n\t\tInput: input,\n\t\tState: LexStart,\n\t}\n}\n\nfunc (l *Lexer) Run() {\n\tfor l.State != nil {\n\t\tl.State = l.State(l)\n\t}\n}\n\nfunc (l *Lexer) Emit(tokenType TokenType) {\n\tl.Tokens = append(l.Tokens, Token{\n\t\tType: tokenType,\n\t\tValue: l.Input[l.Start:l.Pos],\n\t})\n\tl.Start = l.Pos\n}\n\nfunc (l *Lexer) Errorf(format string, args ...interface{}) StateFunc {\n\tl.Tokens = append(l.Tokens, Token{\n\t\tType: TokenError,\n\t\tValue: fmt.Sprintf(format, args...),\n\t})\n\treturn nil\n}\n\nfunc (l *Lexer) Next() rune {\n\tresult, width := utf8.DecodeRuneInString(l.Input[l.Pos:])\n\tl.Pos += width\n\treturn result\n}\n\nfunc (l *Lexer) PeekNext() rune {\n\tresult, _ := utf8.DecodeRuneInString(l.Input[l.Pos:])\n\treturn result\n}\n\nfunc (l *Lexer) EOF() bool {\n\treturn l.Pos >= len(l.Input)\n}\n\nfunc LexStart(l *Lexer) StateFunc {\n\tif l.EOF() {\n\t\treturn LexEOF\n\t}\n\n\tswitch next := l.PeekNext(); next {\n\tcase RuneTab, RuneSpace:\n\t\treturn LexGlobalWhiteSpace\n\tcase RuneNewLine:\n\t\treturn LexNewLine\n\tcase RuneLeftBracket:\n\t\treturn LexLeftBracket\n\tdefault:\n\t\treturn LexKey\n\t}\n}\n\nfunc LexEOF(l *Lexer) StateFunc {\n\tl.Emit(TokenEOF)\n\treturn nil\n}\n\nfunc LexGlobalWhiteSpace(l *Lexer) StateFunc {\n\tl.Next()\n\tl.Start = l.Pos\n\treturn LexStart\n}\n\nfunc LexKeyWhiteSpace(l *Lexer) StateFunc {\n\tl.Next()\n\tl.Start = l.Pos\n\n\tnext := l.PeekNext()\n\n\tif unicode.IsLetter(next) || unicode.IsNumber(next) {\n\t\treturn LexValue\n\t}\n\n\tswitch next {\n\tcase RuneTab, RuneSpace:\n\t\treturn LexKeyWhiteSpace\n\tdefault:\n\t\treturn l.Errorf(\"invalid key/val delimiter\")\n\t}\n}\n\nfunc LexNewLine(l *Lexer) StateFunc {\n\tl.Pos += len(string(RuneNewLine))\n\tl.Emit(TokenNewLine)\n\treturn LexStart\n}\n\nfunc LexLeftBracket(l *Lexer) StateFunc {\n\tl.Pos += len(string(RuneLeftBracket))\n\tl.Emit(TokenLeftBracket)\n\treturn LexSection\n}\n\nfunc LexSection(l *Lexer) StateFunc {\n\tfor {\n\t\tif l.EOF() {\n\t\t\treturn l.Errorf(\"unexpected EOF\")\n\t\t}\n\n\t\tnext := l.PeekNext()\n\t\tif !unicode.IsLetter(next) && !unicode.IsNumber(next) {\n\t\t\tswitch next {\n\t\t\tcase RuneRightBracket:\n\t\t\t\tl.Emit(TokenSection)\n\t\t\t\treturn LexRightBracket\n\t\t\tdefault:\n\t\t\t\treturn l.Errorf(\"missing right bracket\")\n\t\t\t}\n\t\t}\n\n\t\tl.Next()\n\t}\n}\n\nfunc LexRightBracket(l *Lexer) StateFunc {\n\tl.Pos += len(string(RuneRightBracket))\n\tl.Emit(TokenRightBracket)\n\treturn LexStart\n}\n\nfunc LexKey(l *Lexer) StateFunc {\n\tfor {\n\t\tif l.EOF() {\n\t\t\treturn l.Errorf(\"unexpected EOF\")\n\t\t}\n\n\t\tnext := l.PeekNext()\n\t\tif !unicode.IsLetter(next) && !unicode.IsNumber(next) {\n\t\t\tswitch next {\n\t\t\tcase RuneTab, RuneSpace:\n\t\t\t\tl.Emit(TokenKey)\n\t\t\t\treturn LexKeyWhiteSpace\n\t\t\tdefault:\n\t\t\t\treturn l.Errorf(\"invalid key\")\n\t\t\t}\n\t\t}\n\n\t\tl.Next()\n\t}\n}\n\nfunc LexValue(l *Lexer) StateFunc {\n\tfor {\n\t\tif l.EOF() {\n\t\t\treturn l.Errorf(\"unexpected EOF\")\n\t\t}\n\n\t\tr := l.PeekNext()\n\t\tif r == RuneNewLine {\n\t\t\tl.Emit(TokenValue)\n\t\t\treturn LexStart\n\t\t}\n\n\t\tl.Next()\n\t}\n}\n" }, { "alpha_fraction": 0.5846154093742371, "alphanum_fraction": 0.6246153712272644, "avg_line_length": 13.1304349899292, "blob_id": "672124e14751b1de335579122871479d13535376", "content_id": "abbcc51300941d9a7b070d84df4fd7c8ad90de13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 325, "license_type": "no_license", "max_line_length": 35, "num_lines": 23, "path": "/golang-tour/basics/packages_variables_and_functions/16_numeric_constants.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nconst (\n Big = 1 << 63\n Small = Big >> 99\n)\n\nfunc needInt(x uint64) uint64 {\n return x\n}\n\nfunc needFloat(x float64) float64 {\n return x\n}\n\nfunc main() {\n fmt.Println(needInt(Small))\n fmt.Println(needInt(Big))\n fmt.Println(needFloat(Small))\n fmt.Println(needFloat(Big))\n}\n" }, { "alpha_fraction": 0.5840080976486206, "alphanum_fraction": 0.6285424828529358, "avg_line_length": 20.478260040283203, "blob_id": "a3694b5dc8f0979e04e24af3ed97bf1a9bb6e290", "content_id": "4b3f6379ef896bfc1e6f7f7a00d15c3845ece4f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 988, "license_type": "no_license", "max_line_length": 97, "num_lines": 46, "path": "/the-go-programming-language/ch1/src/lissajous/lissajous.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"image\"\n\t\"image/color/palette\"\n\t\"image/gif\"\n\t\"io\"\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n)\n\nfunc main() {\n\tlissajous(os.Stdout)\n}\n\nfunc lissajous(w io.Writer) {\n\tconst (\n\t\tnFrames = 64\n\t\tdelay = 4\n\t\tsize = 200\n\t\tcycles = 2\n\t\tresolution = 0.001\n\t)\n\tanim := gif.GIF{LoopCount: nFrames}\n\tfreq := rand.Float64() * 3.0\n\tphase := 0.0\n\tfor i := 0; i < nFrames; i++ {\n\t\tframe := lissajousFrame(size, cycles, resolution, freq, phase)\n\t\tphase += 0.1\n\t\tanim.Delay = append(anim.Delay, delay)\n\t\tanim.Image = append(anim.Image, frame)\n\t}\n\tgif.EncodeAll(w, &anim)\n}\n\nfunc lissajousFrame(size int, cycles, resolution, freq, phase float64) *image.Paletted {\n\trect := image.Rect(0, 0, size*2+1, size*2+1)\n\timg := image.NewPaletted(rect, palette.WebSafe)\n\tfor t := 0.0; t < cycles*2*math.Pi; t += resolution {\n\t\tx := math.Sin(t)\n\t\ty := math.Sin(t*freq + phase)\n\t\timg.SetColorIndex(size+int(x*float64(size)+0.5), size+int(y*float64(size)+0.5), uint8(t*5+150))\n\t}\n\treturn img\n}\n" }, { "alpha_fraction": 0.5733137726783752, "alphanum_fraction": 0.5777125954627991, "avg_line_length": 16.947368621826172, "blob_id": "62de9caf317c310aa6b0a7f54c18b6c0edc39eaf", "content_id": "97d8047ecd291ed1c240a7752eb4eb0866e80176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 72, "num_lines": 38, "path": "/pickle_rename/test_pickle.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport pickle\nimport sys\n\nfrom test_lib import Foo\n\n\nDBNAME = 'pickle_data'\nCOMMANDS = {}\n\n\ndef command(func):\n COMMANDS[func.__name__] = func\n return func\n\n\n@command\ndef write(*name):\n \"\"\"Write foo object from db file.\"\"\"\n foo = Foo(' '.join(name))\n with open(DBNAME, 'w') as f:\n f.write(pickle.dumps(foo))\n\n\n@command\ndef read():\n \"\"\"Read foo object from db file.\"\"\"\n with open(DBNAME) as f:\n foo = pickle.loads(f.read())\n print foo\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print \"must choose from commands: \" + ', '.join(COMMANDS.keys())\n else:\n COMMANDS[sys.argv[1]](*sys.argv[2:])\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 14, "blob_id": "c89dcd5295b95b02af1da48c1965612e8c60bb56", "content_id": "473ebefc95e0692966b4e5aa5059d5c2e804ef3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 15, "license_type": "no_license", "max_line_length": 14, "num_lines": 1, "path": "/zerorpc-streaming/requirements.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "zerorpc==0.4.4\n" }, { "alpha_fraction": 0.4816232919692993, "alphanum_fraction": 0.5137825608253479, "avg_line_length": 19.076923370361328, "blob_id": "3ac410816dc9787dcf38887aa3d779acb187682a", "content_id": "275ed0a36021a26c564c6202c40690ca9ec83598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1306, "license_type": "no_license", "max_line_length": 70, "num_lines": 65, "path": "/haskell-book/ch4/exercises.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## Mood Swing\n\n1. `Mood`\n2. `Blah` or `Woot`\n3. It should be `changeMood :: Mood -> Mood`\n4. ```\n changeMood Blah = Woot\n changeMood _ = Blah\n ```\n5. Got it right eventually!\n\n## Find the Mistakes\n\n1. Nope, `true` is not in scope.\n2. Nope, `x = 6` is for defining the value x.\n3. Yea.\n4. No, `Merry` and `Happy` are out of scope and don't implement `Ord`.\n5. Nope, `[Num]` and `[Char]` can not be concatenated.\n\n## Chapter Exercises\n\n1. `[a] -> Int`\n2. a) 5\n b) 3\n c) 2\n d) 5\n3. The second one fails because you can't divide `Int`s\n4. `div 6 (length [1, 2, 3])`\n5. `Bool` and `True`\n6. `Bool` and `False`\n7. a) Yes, `True`.\n b) No, because lists require elements to be of the same type.\n c) Yes, `5`.\n d) Yes, `False`.\n e) No, because `9` is not of type `Bool`.\n8. ```\n isPalindrome :: (Eq a) => [a] -> Bool\n isPalindrome x = (reverse x) == x\n ```\n9. ```\n myAbs :: Integer -> Integer\n myAbs a = if a > 0 then a else -a\n ```\n10. ```\n f :: (a, b) -> (c, d) -> ((b, d), (a, c))\n f x y = (((snd x), (snd y)), ((fst x), (fst y)))\n ```\n\n### Correcting Syntax\n\n1. ```\n x = (+)\n f xs = w `x` 1\n where w = length xs\n ```\n2. `\\ x -> x`\n3. `\\ (x:xs) -> x`\n4. `f (a, b) = a`\n\n### Match the function name to their types\n\n1. c\n2. b\n3. a\n4. d\n" }, { "alpha_fraction": 0.5659472346305847, "alphanum_fraction": 0.5923261642456055, "avg_line_length": 13.379310607910156, "blob_id": "8dbd6700fb0508eaa6ef3f845c02cc4301b050dc", "content_id": "364a4343cfc08b03d9cb10e0eee0a96165a3292e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 417, "license_type": "no_license", "max_line_length": 41, "num_lines": 29, "path": "/the-go-programming-language/ch1/src/echo/echo_bench_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package echo_test\n\nimport (\n\t\"echo\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nvar fixture = func() []string {\n\tvar result []string\n\tfor i := 0; i < 1000; i++ {\n\t\tresult = append(result, \"asdfasdfasdf\")\n\t}\n\treturn result\n}()\n\nfunc BenchmarkEcho1(b *testing.B) {\n\tfmt.Println()\n\tfor i := 0; i < b.N; i++ {\n\t\techo.Echo1(fixture)\n\t}\n}\n\nfunc BenchmarkEcho2(b *testing.B) {\n\tfmt.Println()\n\tfor i := 0; i < b.N; i++ {\n\t\techo.Echo2(fixture)\n\t}\n}\n" }, { "alpha_fraction": 0.5823529362678528, "alphanum_fraction": 0.6117647290229797, "avg_line_length": 20.25, "blob_id": "2756f0d1fa50700c0e002bb71f47111d913da3e7", "content_id": "b17f434681b4dfc3dc62d551a3a675eca6fe2d88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 170, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/golang-tour/methods_and_interfaces/view_exercise_images.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/sh\nexport GOPATH=$(pwd)\ngo get\ngo run 16_exercise_images.go |\n tr ':' ' ' |\n awk '{print $2}' |\n base64 -D > exercise_images.png\nopen exercise_images.png\n" }, { "alpha_fraction": 0.5812807679176331, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 14.538461685180664, "blob_id": "7e9b6743c5643ec330ddf99a549f7a96545cc667", "content_id": "a6c8d85c69a30a3d2bc86be13799658a95f52730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 203, "license_type": "no_license", "max_line_length": 72, "num_lines": 13, "path": "/frida/Makefile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nall: frida\n\nbin:\n\tmkdir -p bin\n\nfrida: bin\n\tcd tracee && go build -o ../bin/tracee -gcflags '-N -l'\n\nmeasure:\n\t./measure.py `nm bin/tracee | grep -E '[^\\.]main.f' | awk '{print $1}'`\n\nclean:\n\trm -r bin\n" }, { "alpha_fraction": 0.642294704914093, "alphanum_fraction": 0.642294704914093, "avg_line_length": 19.204545974731445, "blob_id": "e1943dd174483aefe12856ffb3ba1334ccfd1bcf", "content_id": "a8f24386a94716df2a7f4a59a3d6d261901a33d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 889, "license_type": "no_license", "max_line_length": 80, "num_lines": 44, "path": "/elasticsearch-experiment/write.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n)\n\nvar (\n\turl = flag.String(\"url\", \"\", \"elastic search api url\")\n\ttype_ = flag.String(\"type\", \"\", \"the type of document to insert\")\n\tindex = flag.String(\"index\", \"\", \"the index to insert the document into\")\n\tmessage = flag.String(\"message\", \"\", \"message to insert into elastic search\")\n)\n\nconst urlFmt = \"%s/%s/%s\"\n\nfunc fatal(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc Insert(document interface{}) {\n\tdocumentJSON, err := json.Marshal(document)\n\tfatal(err)\n\tpostURL := fmt.Sprintf(urlFmt, *url, *index, *type_)\n\trequest, err := http.NewRequest(\"POST\", postURL, bytes.NewReader(documentJSON))\n\tfatal(err)\n\t_, err = http.DefaultClient.Do(request)\n\tfatal(err)\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tInsert(map[string]interface{}{\n\t\t\"time\": time.Now().UnixNano(),\n\t\t\"message\": message,\n\t})\n}\n" }, { "alpha_fraction": 0.6090909242630005, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 26.5, "blob_id": "0bf8b6e49a6dd22005a05bfd9e8ae3b93ba47efd", "content_id": "dd3ee814a77c3970a312ba2dc8624a35d9e776b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/leetcode/first-missing-positive/test_solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom solution import first_missing_positive\n\n\nclass DescribeFirstMissingPositive:\n @pytest.mark.parametrize(['data', 'result'], [\n ([1, 2, 0], 3),\n ([3, 4, -1, 1], 2),\n ])\n def it_finds_first_missing_positive_integer(self, data, result):\n assert first_missing_positive(data) == result\n" }, { "alpha_fraction": 0.5874999761581421, "alphanum_fraction": 0.6027777791023254, "avg_line_length": 13.693877220153809, "blob_id": "00e06f6b078135d2ec9d062f6a08830a83f1cab7", "content_id": "86000d9d3d249a6481ec1bbce1679b585ba2ff2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 720, "license_type": "no_license", "max_line_length": 57, "num_lines": 49, "path": "/algorithms/compression/huffman.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package compression\n\nimport (\n\t\"github.com/jasonkeene/playground/data-structures/queue\"\n)\n\ntype Node struct {\n\tChar byte\n\tValue int\n\tLeft *Node\n\tRight *Node\n}\n\nfunc HuffmanTree(chars []byte, freqs []int) *Node {\n\tq := queue.NewHeap()\n\n\tfor i := range chars {\n\t\tq.Insert(queue.Element{\n\t\t\tKey: float64(freqs[i]),\n\t\t\tValue: &Node{\n\t\t\t\tChar: chars[i],\n\t\t\t\tValue: freqs[i],\n\t\t\t},\n\t\t})\n\t}\n\n\tvar root *Node\n\tfor {\n\t\tnode1 := q.PopMin().Value.(*Node)\n\t\tif q.Empty() {\n\t\t\troot = node1\n\t\t\tbreak\n\t\t}\n\t\tnode2 := q.PopMin().Value.(*Node)\n\n\t\tnode := &Node{\n\t\t\tValue: node1.Value + node2.Value,\n\t\t\tLeft: node1,\n\t\t\tRight: node2,\n\t\t}\n\n\t\tq.Insert(queue.Element{\n\t\t\tKey: float64(node.Value),\n\t\t\tValue: node,\n\t\t})\n\t}\n\n\treturn root\n}\n" }, { "alpha_fraction": 0.5232892632484436, "alphanum_fraction": 0.5405405163764954, "avg_line_length": 19.220930099487305, "blob_id": "3c584687e4e13a3b13b8b5a1d3b6619c9984cb68", "content_id": "e838d24c9658150470d0232cf6b41a6596594c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1739, "license_type": "no_license", "max_line_length": 65, "num_lines": 86, "path": "/zeromq-the-guide/chapter2/zeroapi/utils.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include \"utils.h\"\n\n\nIntList *IntList_create()\n{\n IntList *int_list = malloc(sizeof(IntList));\n int_list->head = NULL;\n return int_list;\n}\n\n\nvoid IntList_push(IntList *int_list, int data)\n{\n IntNode *new_node = malloc(sizeof(IntNode));\n new_node->data = data;\n new_node->next = NULL;\n if (int_list->head == NULL) {\n int_list->head = new_node;\n } else {\n IntNode *node = int_list->head;\n while (node->next != NULL) {\n node = node->next;\n }\n node->next = new_node;\n }\n}\n\n\nvoid IntList_print(IntList *int_list)\n{\n IntNode *node = int_list->head;\n printf(\"[\");\n while (node != NULL) {\n printf(\"%d\", node->data);\n node = node->next;\n if (node != NULL) {\n printf(\", \");\n }\n }\n printf(\"]\\n\");\n}\n\n\nvoid IntList_destroy(IntList *int_list)\n{\n IntNode *node = int_list->head;\n while (node != NULL) {\n IntNode *next = node->next;\n free(node);\n node = next;\n }\n free(int_list);\n}\n\n\nIntList *get_ports(int argc, char *argv[])\n{\n IntList *int_list = IntList_create();\n for (int i = 1; i < argc; i++) {\n int port = atoi(argv[i]);\n port = port < 8000 ? arc4random() % 57536 + 8000 : port;\n IntList_push(int_list, port);\n }\n return int_list;\n}\n\n\nchar *connection_str(int port)\n{\n char buffer[40];\n snprintf(buffer, sizeof(buffer), \"tcp://127.0.0.1:%i\", port);\n return strndup(buffer, sizeof(buffer));\n}\n\n\nchar *generate_node_id() {\n char buffer[8];\n for (int i = 0; i < sizeof(buffer); i++) {\n buffer[i] = arc4random() % 25 + 97;\n }\n return strndup(buffer, sizeof(buffer));\n}\n" }, { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 21, "blob_id": "83d5eec6a891cc7ea95137c064b5590bb251ed83", "content_id": "23e401e68b6c9a390398fce1332bd9643a928133", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 44, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/the-go-programming-language/ch1/src/echo/run_bench.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/bash\ngo test -bench . | grep -v asdf\n" }, { "alpha_fraction": 0.45783132314682007, "alphanum_fraction": 0.5421686768531799, "avg_line_length": 16.473684310913086, "blob_id": "6d88da4aeed68afcfdc8e39c39e20d6678801013", "content_id": "a909d617ca65d0369ab64e4b5704a0af221d75ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 664, "license_type": "no_license", "max_line_length": 65, "num_lines": 38, "path": "/the-go-programming-language/ch4/src/arrays/bits_different.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"crypto/sha256\"\n\t\"fmt\"\n)\n\nvar pc [256]byte\n\nfunc init() {\n\tfor i := range pc {\n\t\tpc[i] = pc[i/2] + byte(i&1)\n\t}\n}\n\nfunc PopCount(x uint64) int {\n\treturn int(pc[byte(x>>(0*8))] +\n\t\tpc[byte(x>>(1*8))] +\n\t\tpc[byte(x>>(2*8))] +\n\t\tpc[byte(x>>(3*8))] +\n\t\tpc[byte(x>>(4*8))] +\n\t\tpc[byte(x>>(5*8))] +\n\t\tpc[byte(x>>(6*8))] +\n\t\tpc[byte(x>>(7*8))])\n}\n\nfunc BitsDifferent(h1, h2 [sha256.Size]byte) int {\n\tdiff := 0\n\tfor i := 0; i < sha256.Size; i++ {\n\t\tdiff += PopCount(uint64(h1[i] ^ h2[i]))\n\t}\n\treturn diff\n}\n\nfunc main() {\n\th1, h2 := sha256.Sum256([]byte(\"a\")), sha256.Sum256([]byte(\"A\"))\n\tfmt.Printf(\"%d bits different\\n\", BitsDifferent(h1, h2))\n}\n" }, { "alpha_fraction": 0.46136102080345154, "alphanum_fraction": 0.5478662252426147, "avg_line_length": 20.674999237060547, "blob_id": "ac5bcbaac401441dd44836a66a1bf3442a4fc206", "content_id": "96f86f60ee0724a1e96a60a2ab3062ac641ba30a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/huffman-coding/decode.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\" Decode Huffman Coding\n\nExample coding defined here: https://www.youtube.com/watch?v=ZdooBTdW5bM\n\"\"\"\n\n\nCODES = {\n '00': 'I',\n '01': 'S',\n '100': 'P',\n '101': 'R',\n '1100': 'M',\n '1101': 'V',\n '1110': 'E',\n '1111': ' ',\n}\n\n\ndef decode(encoded, codes=CODES):\n message = ''\n part = ''\n max_code_len = max(len(c) for c in codes)\n\n while len(encoded):\n encoded, char = encoded[1:], encoded[0]\n part += char\n if part in codes:\n message += codes[part]\n part = ''\n elif len(part) > max_code_len:\n raise ValueError(\"Invalid code detected: {}\".format(part))\n if part:\n raise ValueError(\"Partial code remaining: {}\".format(part))\n\n return message\n\n\nif __name__ == '__main__':\n print decode('1100000101000101001001000011111010011011110101')\n" }, { "alpha_fraction": 0.6455331444740295, "alphanum_fraction": 0.6599423885345459, "avg_line_length": 22.133333206176758, "blob_id": "f27fee67f590fb6dc3fc4e06d67185218ba53464", "content_id": "fa3367faf1bbc38ab03dc126af530f0273b2893e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 347, "license_type": "no_license", "max_line_length": 77, "num_lines": 15, "path": "/digital-fundamentals/grey-code/grey.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package grey\n\n// Convert from binary representation to grey code. O(1)\nfunc ToGrey(x uint) uint {\n\treturn x>>1 ^ x\n}\n\n// Convert from grey code to binary representation. O(n) where n is the place\n// of the most significant binary digit.\nfunc FromGrey(x uint) uint {\n\tfor mask := x >> 1; mask != 0; mask = mask >> 1 {\n\t\tx = x ^ mask\n\t}\n\treturn x\n}\n" }, { "alpha_fraction": 0.5620608925819397, "alphanum_fraction": 0.5725995302200317, "avg_line_length": 16.79166603088379, "blob_id": "efc18356cd64f6c9979a27c44aa8036d40382c54", "content_id": "fcb626de3047fb02936f2b3e3006112cf25a073b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 854, "license_type": "no_license", "max_line_length": 68, "num_lines": 48, "path": "/algorithms/graph/dijkstra.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nimport (\n\t\"math\"\n\n\t\"github.com/jasonkeene/playground/data-structures/queue\"\n)\n\nfunc Dijkstra(s int, g Graph, q queue.Priority) ([]float64, []int) {\n\tfor i := range g.Nodes {\n\t\tkey := math.Inf(1)\n\t\tif i == s {\n\t\t\tkey = 0\n\t\t}\n\t\tq.Insert(queue.Element{\n\t\t\tKey: key,\n\t\t\tValue: i,\n\t\t})\n\t}\n\tshortest := make([]float64, len(g.Nodes))\n\tfor i := range shortest {\n\t\tsh := math.Inf(1)\n\t\tif i == s {\n\t\t\tsh = 0\n\t\t}\n\t\tshortest[i] = sh\n\t}\n\tprev := make([]int, len(g.Nodes))\n\tfor i := range prev {\n\t\tprev[i] = -1\n\t}\n\n\tfor !q.Empty() {\n\t\ti := q.PopMin().Value.(int)\n\t\tfor _, e := range g.Edges[i] {\n\t\t\ttmpShortest := shortest[e.Target]\n\t\t\trelax(i, e.Target, e.Weight, shortest, prev)\n\t\t\tif shortest[e.Target] < tmpShortest {\n\t\t\t\tq.Decrease(queue.Element{\n\t\t\t\t\tKey: shortest[e.Target],\n\t\t\t\t\tValue: e.Target,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn shortest, prev\n}\n" }, { "alpha_fraction": 0.5672131180763245, "alphanum_fraction": 0.6721311211585999, "avg_line_length": 15, "blob_id": "3e9ee928ba5f8d0fc9fbd3ce3801e1e7c313b749", "content_id": "e0a0534db6f5d715d3f3f1fc30ed725ff54fd604", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 305, "license_type": "no_license", "max_line_length": 97, "num_lines": 19, "path": "/elasticsearch-experiment/README.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## Example\n\nStarting elastic search:\n\n```bash\n./docker-up.sh\n```\n\nInserting document:\n\n```bash\ngo run write.go -url \"http://192.168.99.100:32773\" -index debug -type test -message \"hello world\"\n```\n\nReading documents:\n\n```bash\ngo run read.go -url \"http://192.168.99.100:32773\" -index debug -type test\n```\n" }, { "alpha_fraction": 0.5293551683425903, "alphanum_fraction": 0.5534167289733887, "avg_line_length": 22.08888816833496, "blob_id": "73ccab09be586b603e2dcd6999432d161a055463", "content_id": "e4ab3f9e76647b31a39b3d1ee5e28a66b4e4dfd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 67, "num_lines": 45, "path": "/cpp-how-to-program/chapter9/fig09_03.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <stdexcept>\n\n#include \"Time.h\"\n\nint main(int argc, char *argv[])\n{\n Time &t = *(new Time());\n\n std::cout << \"initial universal time: \";\n t.printUniversal();\n std::cout << std::endl;\n\n std::cout << \"initial standard time: \";\n t.printStandard();\n std::cout << std::endl;\n\n std::cout << \"trying to set to 13, 40, 30\" << std::endl;\n t.setTime(13, 40, 30);\n\n std::cout << \"new universal time: \";\n t.printUniversal();\n std::cout << std::endl;\n\n std::cout << \"new standard time: \";\n t.printStandard();\n std::cout << std::endl;\n\n std::cout << \"trying to set to 55, 40, 30\" << std::endl;\n try {\n t.setTime(55, 40, 30);\n } catch (std::invalid_argument &e) {\n std::cout << \"caught exception: \" << e.what() << std::endl;\n }\n\n std::cout << \"after exception universal time: \";\n t.printUniversal();\n std::cout << std::endl;\n\n std::cout << \"after exception standard time: \";\n t.printStandard();\n std::cout << std::endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.715133547782898, "alphanum_fraction": 0.721068263053894, "avg_line_length": 54.66666793823242, "blob_id": "74892d132fb48c6c8083c7c0aa3fabb01a0d6952", "content_id": "5c6e35b4be14088b29061f7d3e6b74724ed7a616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 337, "license_type": "no_license", "max_line_length": 78, "num_lines": 6, "path": "/haskell-book/ch4/notes.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n - Type constructor is the name of the datatype\n - Data constructors are the valid values of a datatype?\n - Type classes are kind of like interfaces in golang\n - Term level is similar to run time and Type level is similar to compile time\n - Why does this not work? `(6 :: Int) / (2 :: Int)`\n - Is there a precedence table for Haskell?\n\n" }, { "alpha_fraction": 0.6123595237731934, "alphanum_fraction": 0.6441947817802429, "avg_line_length": 14.70588207244873, "blob_id": "26d3db52e4161042a3906941c058ff0533470b23", "content_id": "b6a8bdb895b174ee7f42ea76c1a0263d35ba0159", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 37, "num_lines": 34, "path": "/frida/measure.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport frida\nimport sys\nimport time\n\nsession = frida.attach(\"tracee\")\nscript = session.create_script(\"\"\"\nvar count = 0;\n\nsetInterval(function () {\n send(count);\n count = 0;\n}, 1000);\n\nInterceptor.attach(ptr(\"%s\"), {\n onEnter: function(args) {\n count++;\n }\n});\n\"\"\" % int(sys.argv[1], 16))\n\ndef on_message(message, data):\n print(message)\n\nscript.on('message', on_message)\n\nscript.load()\n\ntry:\n time.sleep(99999999)\nexcept KeyboardInterrupt:\n pass\n" }, { "alpha_fraction": 0.5628238320350647, "alphanum_fraction": 0.5630667209625244, "avg_line_length": 29.877500534057617, "blob_id": "31936ad41d17d6a0474764f631ad2694e452fd85", "content_id": "6104aadd86ba93c3afed3a395eb0e770f8e60f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12354, "license_type": "no_license", "max_line_length": 81, "num_lines": 400, "path": "/haskell-book/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n[Haskell Programming from First Principles](http://haskellbook.com/progress.html)\n\n - [x] Contents\n - [x] License\n - [x] Authors' preface\n - [x] Acknowledgements\n - [x] Introduction\n - [x] Why This Book\n - [x] A few words to new programmers\n - [x] Haskevangelism\n - [x] What's in this book?\n - [x] Best practices for examples and exercises\n - [x] All You Need is Lambda\n - [x] All You Need is Lambda\n - [x] What is functional programming?\n - [x] What is a function?\n - [x] The structure of lambda terms\n - [x] Beta reduction\n - [x] Multiple arguments\n - [x] Evaluation is simplification\n - [x] Combinators\n - [x] Divergence\n - [x] Summary\n - [x] Chapter Exercises\n - [x] Answers\n - [x] Definitions\n - [x] Follow-up resources\n - [x] Hello, Haskell!\n - [x] Hello, Haskell\n - [x] Interacting with Haskell code\n - [x] Understanding expressions\n - [x] Functions\n - [x] Infix operators\n - [x] Declaring values\n - [x] Arithmetic functions in Haskell\n - [x] Negative numbers\n - [x] Parenthesizing infix functions\n - [x] Laws for quotients and remainders\n - [x] Evaluation\n - [x] Let and where\n - [x] Chapter Exercises\n - [x] Definitions\n - [x] Follow-up resources\n - [x] Strings\n - [x] Printing strings\n - [x] A first look at types\n - [x] Printing simple strings\n - [x] Types of concatenation functions\n - [x] Concatenation and scoping\n - [x] More list functions\n - [x] Chapter Exercises\n - [x] Definitions\n - [x] Basic datatypes\n - [x] Basic Datatypes\n - [x] Anatomy of a data declaration\n - [x] Numeric types\n - [x] Comparing values\n - [x] Tuples\n - [x] Lists\n - [x] Chapter Exercises\n - [x] Definitions\n - [x] Types\n - [x] Types\n - [x] What are types?\n - [x] Querying and Reading Types\n - [x] Typeclass constrained type variables\n - [x] Currying\n - [x] Polymorphism\n - [x] Type inference\n - [x] Asserting types for declarations\n - [x] Chapter Exercises\n - [x] Definitions\n - [x] Follow-up resources\n - [ ] Typeclasses\n - [x] Typeclasses\n - [x] What are typeclasses?\n - [x] Back to Bool\n - [x] Eq\n - [x] Num\n - [x] Type-defaulting typeclasses\n - [ ] Ord (left off on page 203, exercises)\n - [ ] Enum\n - [ ] Show\n - [ ] Read\n - [ ] Instances are dispatched by type\n - [ ] Writing typeclass instances\n - [ ] Gimme more operations\n - [ ] Chapter Exercises\n - [ ] Chapter Definitions\n - [ ] Typeclass inheritance, partial\n - [ ] Follow-up resources\n - [ ] More functional patterns\n - [ ] Make it func-y\n - [ ] Arguments and parameters\n - [ ] Anonymous functions\n - [ ] Pattern matching\n - [ ] Case expressions\n - [ ] Higher-order functions\n - [ ] Guards\n - [ ] Function composition\n - [ ] Pointfree style\n - [ ] Demonstrating composition\n - [ ] Chapter Exercises\n - [ ] Chapter Definitions\n - [ ] Follow-up resources\n - [ ] Recursion\n - [ ] Recursion\n - [ ] Factorial\n - [ ] Bottom\n - [ ] Fibonacci numbers\n - [ ] Integral division from scratch\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Lists\n - [ ] Lists\n - [ ] The list datatype\n - [ ] Pattern matching on lists\n - [ ] List's syntactic sugar\n - [ ] Using ranges to construct lists\n - [ ] Extracting portions of lists\n - [ ] List comprehensions\n - [ ] Spines and non-strict evaluation\n - [ ] Transforming lists of values\n - [ ] Filtering lists of values\n - [ ] Zipping lists\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Folding lists\n - [ ] Folds\n - [ ] Bringing you into the fold\n - [ ] Recursive patterns\n - [ ] Fold right\n - [ ] Fold left\n - [ ] How to write fold functions\n - [ ] Folding and evaluation\n - [ ] Summary\n - [ ] Scans\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Algebraic datatypes\n - [ ] Algebraic datatypes\n - [ ] Data declarations review\n - [ ] Data and type constructors\n - [ ] Data constructors and values\n - [ ] What's a type and what's data?\n - [ ] Data constructor arities\n - [ ] What makes these datatypes algebraic?\n - [ ] Sum types\n - [ ] Product types\n - [ ] Normal form\n - [ ] Constructing and deconstructing values\n - [ ] Function type is exponential\n - [ ] Higher-kinded datatypes\n - [ ] Lists are polymorphic\n - [ ] Binary Tree\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Signaling adversity\n - [ ] Signaling adversity\n - [ ] How I learned to stop worrying and love Nothing\n - [ ] Bleating either\n - [ ] Kinds, a thousand stars in your types\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Building projects\n - [ ] Modules\n - [ ] Making packages with Stack\n - [ ] Working with a basic project\n - [ ] Making our project a library\n - [ ] Module exports\n - [ ] More on importing modules\n - [ ] Making our program interactive\n - [ ] do syntax and IO\n - [ ] Hangman game\n - [ ] Step One: Importing modules\n - [ ] Step Two: Generating a word list\n - [ ] Step Three: Making a puzzle\n - [ ] Adding a newtype\n - [ ] Chapter exercises\n - [ ] Follow-up resources\n - [ ] Testing\n - [ ] Testing\n - [ ] A quick tour of testing for the uninitiated\n - [ ] Conventional testing\n - [ ] Enter QuickCheck\n - [ ] Morse code\n - [ ] Kicking around QuickCheck\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Monoid, Semigroup\n - [ ] Monoids and semigroups\n - [ ] What we talk about when we talk about algebras\n - [ ] Monoid\n - [ ] How Monoid is defined in Haskell\n - [ ] Examples of using Monoid\n - [ ] Why Integer doesn't have a Monoid\n - [ ] Why bother?\n - [ ] Laws\n - [ ] Different instance, same representation\n - [ ] Reusing algebras by asking for algebras\n - [ ] Madness\n - [ ] Better living through QuickCheck\n - [ ] Semigroup\n - [ ] Chapter exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Functor\n - [ ] Functor\n - [ ] What's a functor?\n - [ ] There's a whole lot of fmap going round\n - [ ] Let's talk about f, baby\n - [ ] Functor Laws\n - [ ] The Good, the Bad, and the Ugly\n - [ ] Commonly used functors\n - [ ] Transforming the unapplied type argument\n - [ ] QuickChecking Functor instances\n - [ ] Exercises: Instances of Func\n - [ ] Ignoring possibilities\n - [ ] A somewhat surprising functor\n - [ ] More structure, more functors\n - [ ] IO Functor\n - [ ] What if we want to do something different?\n - [ ] Functors are unique to a datatype\n - [ ] Chapter exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Applicative\n - [ ] Applicative\n - [ ] Defining Applicative\n - [ ] Functor vs. Applicative\n - [ ] Applicative functors are monoidal functors\n - [ ] Applicative in use\n - [ ] Applicative laws\n - [ ] You knew this was coming\n - [ ] ZipList Monoid\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Monad\n - [ ] Monad\n - [ ] Sorry — Monad is not a burrito\n - [ ] Do syntax and monads\n - [ ] Examples of Monad use\n - [ ] Monad laws\n - [ ] Application and composition\n - [ ] Chapter Exercises\n - [ ] Definition\n - [ ] Follow-up resources\n - [ ] Applying structure\n - [ ] Applied structure\n - [ ] Monoid\n - [ ] Functor\n - [ ] Applicative\n - [ ] Monad\n - [ ] An end-to-end example: URL shortener\n - [ ] That's a wrap!\n - [ ] Follow-up resources\n - [ ] Foldable\n - [ ] Foldable\n - [ ] The Foldable class\n - [ ] Revenge of the monoids\n - [ ] Demonstrating Foldable instances\n - [ ] Some basic derived operations\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] Traversable\n - [ ] Traversable\n - [ ] The Traversable typeclass definition\n - [ ] sequenceA\n - [ ] traverse\n - [ ] So, what's traversable for?\n - [ ] Morse code revisited\n - [ ] Axing tedious code\n - [ ] Do all the things\n - [ ] Traversable instances\n - [ ] Traversable Laws\n - [ ] Quality Control\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] Reader\n - [ ] Reader\n - [ ] A new beginning\n - [ ] This is Reader\n - [ ] Breaking down the Functor of functions\n - [ ] But uh, Reader?\n - [ ] Functions have an Applicative too\n - [ ] The Monad of functions\n - [ ] Reader Monad by itself is kinda boring\n - [ ] You can change what comes below, but not above\n - [ ] You tend to see ReaderT, not Reader\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] State\n - [ ] State\n - [ ] What is state?\n - [ ] Random numbers\n - [ ] The State newtype\n - [ ] Throw down\n - [ ] Write State for yourself\n - [ ] Get a coding job with one weird trick\n - [ ] Chapter exercises\n - [ ] Follow-up resources\n - [ ] Parser combinators\n - [ ] Parser combinators\n - [ ] A few more words of introduction\n - [ ] Understanding the parsing process\n - [ ] Parsing fractions\n - [ ] Haskell's parsing ecosystem\n - [ ] Alternative\n - [ ] Parsing configuration files\n - [ ] Character and token parsers\n - [ ] Polymorphic parsers\n - [ ] Marshalling from an AST to a datatype\n - [ ] Chapter Exercises\n - [ ] Definitions\n - [ ] Follow-up resources\n - [ ] Composing types\n - [ ] Composing types\n - [ ] Common functions as types\n - [ ] Two little functors sittin' in a tree, L-I-F-T-I-N-G\n - [ ] Twinplicative\n - [ ] Twonad?\n - [ ] Exercises: Compose Instances\n - [ ] Monad transformers\n - [ ] IdentityT\n - [ ] Finding a pattern\n - [ ] Monad transformers\n - [ ] Monad transformers\n - [ ] MaybeT\n - [ ] EitherT\n - [ ] ReaderT\n - [ ] StateT\n - [ ] Types you probably don't want to use\n - [ ] Recovering an ordinary type from a transformer\n - [ ] Lexically inner is structurally outer\n - [ ] MonadTrans\n - [ ] MonadIO aka zoom-zoom\n - [ ] Monad transformers in use\n - [ ] Monads do not commute\n - [ ] Transform if you want to\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] Non-strictness\n - [ ] Laziness\n - [ ] Observational Bottom Theory\n - [ ] Outside in, inside out\n - [ ] What does the other way look like?\n - [ ] Call by name, call by need\n - [ ] Non-strict evaluation changes what we can do\n - [ ] Thunk Life\n - [ ] Sharing is caring\n - [ ] Refutable and irrefutable patterns\n - [ ] Bang-patterns\n - [ ] Strict and StrictData\n - [ ] Adding strictness\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] Basic libraries\n - [ ] Basic libraries and data structures\n - [ ] Benchmarking with Criterion\n - [ ] Profiling your programs\n - [ ] Constant applicative forms\n - [ ] Map\n - [ ] Set\n - [ ] Sequence\n - [ ] Vector\n - [ ] String types\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] IO\n - [ ] IO\n - [ ] Where IO explanations go astray\n - [ ] The reason we need this type\n - [ ] Sharing\n - [ ] IO doesn't disable sharing for everything\n - [ ] Purity is losing meaning\n - [ ] IO's Functor, Applicative, and Monad\n - [ ] Well, then, how do we MVar?\n - [ ] Chapter Exercises\n - [ ] Follow-up resources\n - [ ] When things go wrong\n - [ ] Exceptions\n - [ ] The Exception class and methods\n - [ ] This machine kills programs\n - [ ] Want either? Try!\n - [ ] The unbearable imprecision of trying\n - [ ] Why throwIO?\n - [ ] Making our own exception types\n - [ ] Surprising interaction with bottom\n - [ ] Asynchronous Exceptions\n - [ ] Follow-up Reading\n - [ ] Final project\n - [ ] Final project\n - [ ] fingerd\n - [ ] Exploring finger\n - [ ] Slightly modernized fingerd\n - [ ] Chapter Exercises\n" }, { "alpha_fraction": 0.5173501372337341, "alphanum_fraction": 0.5621451139450073, "avg_line_length": 21.97101402282715, "blob_id": "dd9e21d4ea485dcec2dae5f252233ea6d747aaa5", "content_id": "af449add827b51a052dff486f6f1607945ab145d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1585, "license_type": "no_license", "max_line_length": 63, "num_lines": 69, "path": "/algorithms/search/binary_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package search_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\n\t\"github.com/jasonkeene/playground/algorithms/search\"\n)\n\nfunc TestBinarySearchCorrectness(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdata []int\n\t\ttcs map[string]struct {\n\t\t\tsearch int\n\t\t\texpected int\n\t\t}\n\t}{\n\t\t\"ordered slice of integers\": {\n\t\t\t[]int{0, 1, 3, 6, 9, 42, 53, 71, 90, 567, 998, 1000, 10001},\n\t\t\tmap[string]struct{ search, expected int }{\n\t\t\t\t\"element exists\": {90, 8},\n\t\t\t\t\"element does not exist\": {999, -1},\n\t\t\t\t\"element is smaller than 0th element\": {-20, -1},\n\t\t\t\t\"element is larger than nth element\": {99999, -1},\n\t\t\t},\n\t\t},\n\t\t\"ordered two element slice\": {\n\t\t\t[]int{55, 90},\n\t\t\tmap[string]struct{ search, expected int }{\n\t\t\t\t\"element exists\": {90, 1},\n\t\t\t\t\"element does not exist\": {999, -1},\n\t\t\t},\n\t\t},\n\t\t\"single element slice\": {\n\t\t\t[]int{90},\n\t\t\tmap[string]struct{ search, expected int }{\n\t\t\t\t\"element exists\": {90, 0},\n\t\t\t\t\"element does not exist\": {999, -1},\n\t\t\t},\n\t\t},\n\t\t\"empty slice\": {\n\t\t\t[]int{},\n\t\t\tmap[string]struct{ search, expected int }{\n\t\t\t\t\"element does not exist\": {90, -1},\n\t\t\t},\n\t\t},\n\t\t\"nil\": {\n\t\t\tnil,\n\t\t\tmap[string]struct{ search, expected int }{\n\t\t\t\t\"element does not exist\": {90, -1},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor tcName, tc := range test.tcs {\n\t\t\t\tt.Run(tcName, func(t *testing.T) {\n\t\t\t\t\tresult := search.BinarySearch(test.data, tc.search)\n\n\t\t\t\t\tif result != tc.expected {\n\t\t\t\t\t\tt.Fatal(cmp.Diff(result, tc.expected))\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.43913042545318604, "alphanum_fraction": 0.4434782564640045, "avg_line_length": 23.571428298950195, "blob_id": "d64d50b12514aced033c4d23d549e11c8b7e1424", "content_id": "5075e658c261acdc17a7e005a3419ca2297649d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/leetcode/word-ladder/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n\ndef off_by_one(a, b):\n off = False\n for i in xrange(len(a)):\n if a[i] != b[i]:\n if off is False:\n off = True\n else:\n return False\n return off\n\n\ndef ladder_length(start, end, dictionary):\n dictionary |= {end}\n count = 1\n frame = {start}\n while True:\n if not frame:\n return 0\n dictionary -= frame\n count += 1\n related = set()\n for word in frame:\n for dword in dictionary:\n if off_by_one(word, dword):\n if dword == end:\n return count\n related.add(dword)\n frame = related\n" }, { "alpha_fraction": 0.5597387552261353, "alphanum_fraction": 0.5724164247512817, "avg_line_length": 18.28148078918457, "blob_id": "5f8b607d7b1e7531ad0dd4be3848f99b856fd372", "content_id": "70d3209e862ac022ee7f916b1c61e71c0fe61539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2603, "license_type": "no_license", "max_line_length": 97, "num_lines": 135, "path": "/algorithms/str/lcs.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package str\n\nfunc LCS(a, b string) string {\n\tcache := make(map[string]string)\n\treturn lcs(a, b, cache)\n}\n\nfunc lcs(a, b string, cache map[string]string) string {\n\tv, ok := cache[key(a, b)]\n\tif ok {\n\t\treturn v\n\t}\n\n\tif len(a) == 0 || len(b) == 0 {\n\t\tcache[key(a, b)] = \"\"\n\t\treturn \"\"\n\t}\n\n\tif a[len(a)-1] == b[len(b)-1] {\n\t\tv = lcs(a[:len(a)-1], b[:len(b)-1], cache) + string(b[len(b)-1])\n\t\tcache[key(a, b)] = v\n\t\treturn v\n\t}\n\tav := lcs(a[:len(a)-1], b, cache)\n\tbv := lcs(a, b[:len(b)-1], cache)\n\tif av > bv {\n\t\tcache[key(a, b)] = av\n\t\treturn av\n\t}\n\tcache[key(a, b)] = bv\n\treturn bv\n}\n\nfunc key(a, b string) string {\n\treturn a + \"|\" + b\n}\n\nfunc LCSTable(a, b string) string {\n\ttable := LCSLengthTable(a, b)\n\treturn assembleLCS(a, b, table, len(a)-1, len(b)-1)\n}\n\nfunc assembleLCS(a, b string, table [][]int, i, j int) string {\n\tif value(table, i, j) == 0 {\n\t\treturn \"\"\n\t}\n\n\tif a[i] == b[j] {\n\t\treturn assembleLCS(a, b, table, i-1, j-1) + string(a[i])\n\t}\n\n\tif above(table, i, j) > left(table, i, j) {\n\t\treturn assembleLCS(a, b, table, i-1, j)\n\t}\n\n\treturn assembleLCS(a, b, table, i, j-1)\n}\n\nfunc AllLCS(a, b string) map[string]struct{} {\n\tresults := make(map[string]struct{})\n\ttable := LCSLengthTable(a, b)\n\tassembleAllLCS(a, b, \"\", table, results, len(a)-1, len(b)-1)\n\treturn results\n}\n\nfunc assembleAllLCS(a, b, partial string, table [][]int, results map[string]struct{}, i, j int) {\n\tif value(table, i, j) == 0 {\n\t\tresults[partial] = struct{}{}\n\t\treturn\n\t}\n\n\tif a[i] == b[j] {\n\t\tassembleAllLCS(a, b, string(a[i])+partial, table, results, i-1, j-1)\n\t\treturn\n\t}\n\n\tu := above(table, i, j)\n\tl := left(table, i, j)\n\n\tif u > l {\n\t\tassembleAllLCS(a, b, partial, table, results, i-1, j)\n\t\treturn\n\t}\n\n\tif u < l {\n\t\tassembleAllLCS(a, b, partial, table, results, i, j-1)\n\t\treturn\n\t}\n\n\tassembleAllLCS(a, b, partial, table, results, i-1, j)\n\tassembleAllLCS(a, b, partial, table, results, i, j-1)\n}\n\nfunc LCSLengthTable(a, b string) [][]int {\n\ttable := make([][]int, len(a))\n\tfor i := range table {\n\t\ttable[i] = make([]int, len(b))\n\n\t\tfor j := range table[i] {\n\t\t\tif a[i] == b[j] {\n\t\t\t\ttable[i][j] = diagonal(table, i, j) + 1\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttable[i][j] = max(above(table, i, j), left(table, i, j))\n\t\t}\n\t}\n\treturn table\n}\n\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc value(table [][]int, i, j int) int {\n\tif i < 0 || j < 0 {\n\t\treturn 0\n\t}\n\treturn table[i][j]\n}\n\nfunc diagonal(table [][]int, i, j int) int {\n\treturn value(table, i-1, j-1)\n}\n\nfunc above(table [][]int, i, j int) int {\n\treturn value(table, i-1, j)\n}\n\nfunc left(table [][]int, i, j int) int {\n\treturn value(table, i, j-1)\n}\n" }, { "alpha_fraction": 0.7109488844871521, "alphanum_fraction": 0.7167882919311523, "avg_line_length": 17.513513565063477, "blob_id": "55cab12c9a96e6eaa5e4753b38b7839f0875ca6b", "content_id": "149c5915316008ae81df7929a9ed6cc7c89f7df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 685, "license_type": "no_license", "max_line_length": 70, "num_lines": 37, "path": "/saltstack/prototypes/bin/bootstrap-master.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# update packages\n#apt-get update\n#apt-get upgrade --yes\n\n# install salt minion\n/vagrant/bin/install-minion.sh\n\n# link in salt states and pillar data\nln -s /vagrant/salt /srv/salt\nln -s /vagrant/pillar /srv/pillar\n\n# configure salt minion to bootstrap\ncat << EOF > /etc/salt/minion\nfile_client: local\nfile_roots:\n base:\n - /srv/salt\ngrains:\n roles:\n - master\nEOF\n\n# highstate!\nsalt-call --local state.highstate\n\n# set hostname\nsalt-call network.mod_hostname master\n\n# copy over test_key\nMINION_KEYS=$(echo /etc/salt/pki/master/minions/{proxy{1,2},app{1,2}})\ncat /vagrant/salt/minion.pub | tee $MINION_KEYS > /dev/null\n\necho\necho\necho \"Provision complete!\"\n" }, { "alpha_fraction": 0.534246563911438, "alphanum_fraction": 0.5677320957183838, "avg_line_length": 20.19354820251465, "blob_id": "c04b653e254bd7e9dd37ef1755f4019a3982f3a4", "content_id": "8515a1a4867da44082fc2de9ac9caa5d4bac795e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 657, "license_type": "no_license", "max_line_length": 49, "num_lines": 31, "path": "/zeromq-the-guide/chapter1/worker.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n void *context = zmq_ctx_new();\n\n // socket to receive jobs from\n void *puller = zmq_socket(context, ZMQ_PULL);\n zmq_connect(puller, \"tcp://127.0.0.1:5557\");\n\n // socket to send jobs to sink\n void *pusher = zmq_socket(context, ZMQ_PUSH);\n zmq_connect(pusher, \"tcp://127.0.0.1:5558\");\n\n while (1) {\n char *string = s_recv(puller);\n printf(\"%s.\", string);\n fflush(stdout);\n s_sleep(atoi(string));\n free(string);\n s_send(pusher, \"\");\n }\n\n zmq_close(puller);\n zmq_close(pusher);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.4603591859340668, "alphanum_fraction": 0.4883924722671509, "avg_line_length": 25.546510696411133, "blob_id": "aedd9108667e5ce1bf5ba96b3430ad501d112c65", "content_id": "d386dbb76380f37b57d9d9aa962303c0d48997ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2283, "license_type": "no_license", "max_line_length": 76, "num_lines": 86, "path": "/golang-tour/concurrency/08_exercise_equivalent_binary_trees.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n \"fmt\"\n \"sort\"\n \"code.google.com/p/go-tour/tree\"\n)\n\n// Walk walks the tree t sending all values\n// from the tree to the channel ch.\nfunc Walk(t *tree.Tree, ch chan int) {\n ch <- t.Value\n if t.Left != nil {\n Walk(t.Left, ch)\n }\n if t.Right != nil {\n Walk(t.Right, ch)\n }\n}\n\n// Same determines whether the trees\n// t1 and t2 contain the same values.\nfunc Same(t1, t2 *tree.Tree) bool {\n ch1, ch2, done := make(chan int), make(chan int), make(chan int)\n r1, r2 := make([]int, 0), make([]int, 0)\n\n go func () {\n Walk(t1, ch1)\n done <- 1\n }()\n go func () {\n Walk(t2, ch2)\n done <- 1\n }()\n for n := 2; n > 0; {\n select {\n // these two cases are almost identical, I wish I knew how to\n // write a case that reads when both channels are ready\n case v1 := <-ch1:\n // this read could block indefinitely if t1 and t2\n // have different sizes\n v2 := <-ch2\n if v1 != v2 {\n // push these values into a slice for comparison later\n r1 = append(r1, v1)\n r2 = append(r2, v2)\n }\n case v2 := <-ch2:\n // this read could block indefinitely if t1 and t2\n // have different sizes\n v1 := <-ch1\n if v1 != v2 {\n // push these values into a slice for comparison later\n r1 = append(r1, v1)\n r2 = append(r2, v2)\n }\n case <-done:\n n--\n }\n }\n\n // if there are any remaining values, make sure they are all the same\n if len(r1) != 0 || len(r2) != 0 {\n if len(r1) != len(r2) {\n // this will never get hit as it would block indefinitely in the\n // select above\n return false\n }\n // sort slices to make them easy to compare\n is1 := sort.IntSlice(r1)\n is2 := sort.IntSlice(r2)\n sort.Sort(is1)\n sort.Sort(is2)\n for i, x := range is1 {\n if x != is2[i] {\n return false\n }\n }\n }\n return true\n}\n\nfunc main() {\n fmt.Println(Same(tree.New(4), tree.New(4)))\n fmt.Println(Same(tree.New(3), tree.New(4)))\n}\n" }, { "alpha_fraction": 0.8636363744735718, "alphanum_fraction": 0.8636363744735718, "avg_line_length": 43, "blob_id": "b1385d0b47971160c9b8857de046ff0b1ac89dc5", "content_id": "be27611dba80b832b34e03b23b20c3bedf49f8a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 44, "license_type": "no_license", "max_line_length": 43, "num_lines": 1, "path": "/lcs/go.mod", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "module github.com/jasonkeene/playground/lcs\n" }, { "alpha_fraction": 0.6319078803062439, "alphanum_fraction": 0.6325657963752747, "avg_line_length": 23.50806427001953, "blob_id": "4eb95b51b3bffadf82eda489c18f9286c950a7b0", "content_id": "a5d7d273ad7dc433cc43cc44ab0c12462521362e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3040, "license_type": "no_license", "max_line_length": 71, "num_lines": 124, "path": "/bosh/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Documentation](https://bosh.io/docs)\n\n### Introduction\n\n - [x] What is BOSH?\n\t - [x] What problems does BOSH solve?\n\t\t - [x] Stemcell\n\t\t - [x] Release\n\t\t - [x] Deployment\n\t - ~~Comparison to other tools~~\n - [x] Terminology\n\n### Install BOSH\n\n#### Install BOSH with bosh-init\n\n - [x] BOSH components\n - [x] Install bosh-init\n - [ ] Using bosh-init\n - [ ] Migrating to bosh-init from the micro CLI plugin\n - [x] Install BOSH CLI\n - [x] Bootstrapping an environment\n\t - [ ] On AWS\n\t - ~~On OpenStack~~\n\t - ~~On vSphere~~\n\t - ~~On vCloud~~\n\t - [x] On Local machine using BOSH Lite\n\n - [ ] Director SSL Certificate Configuration\n - [ ] Using bosh-init\n \t - [ ] Migrating to bosh-init from the micro CLI plugin\n - [ ] Install BOSH CLI\n\n#### Install BOSH with the micro CLI plugin [deprecated, but supported]\n\n - [ ] Bootstrapping an environment\n \t - [ ] On AWS\n \t - ~~On OpenStack~~\n \t - ~~On vSphere~~\n\n#### Advanced Director configuration\n\n - [ ] User management\n\t - [ ] UAA Integration\n - [ ] Configuring External Database\n - [ ] Configuring External Blobstore\n - [ ] Troubleshooting\n\n### Using BOSH to deploy software\n\n - [ ] Basic workflow\n\t - [ ] Deployment basics\n\t\t - [ ] Deployment manifest schema\n\t - [ ] Uploading stemcells\n\t - [ ] Uploading releases\n\t - [ ] Deploying\n\t - [ ] Running one off tasks\n\t - [ ] Updating deployment to deal with security vulnerabilities\n - [ ] Deploying step-by-step\n - [ ] CLI Commands\n - [ ] Director tasks\n\n#### Detailed Deployment Configuration\n\n - [ ] Deployment Jobs\n - [ ] Resource Pools\n\t - [ ] VM anti-affinity\n - [ ] Networks\n - [ ] Persistent disks\n - [ ] Trusted certificates\n - [ ] IaaS specifics\n\t - [ ] AWS\n\t\t - [ ] Using IAM instance profiles\n\t - [ ] Using instance storage\n\t - ~~OpenStack~~\n\t - ~~vSphere~~\n\t - ~~vCloud~~\n\t - ~~Azure [beta]~~\n\t - [ ] Warden/Garden\n\n#### Health Management of VMs and Processes\n\n - [ ] Monitoring\n\t - [ ] Configuring Health Monitor\n - [ ] Process monitoring with Monit\n - [ ] Manual repair with Cloud Check\n - [ ] Automatic repair with Resurrector\n - [ ] Persistent disk snapshotting\n\n#### VM Configuration (Looking inside a Deployment)\n\n - [ ] Structure of a BOSH VM\n\t - [ ] VM Configuration Locations\n - [ ] Location and use of logs\n - [ ] Debugging issues with jobs\n\n### Using BOSH to package and distribute software\n\n - [ ] What is a release?\n \t - [ ] Creating a release\n\t - [ ] Testing with dev releases\n\t - [ ] Cutting final releases\n\t\t - [ ] Versioning of releases\n - [ ] What is a job?\n\t - [ ] Creating a job\n\t - [ ] Job properties\n\t - [ ] Job lifecycle\n\t\t - [ ] Pre-start script\n\t\t - [ ] Drain script\n - [ ] What is a package?\n\t - [ ] Creating a package\n\t - [ ] Relationship to release blobs\n - [ ] How do releases, jobs, and packages interact?\n - [ ] Managing release repository\n\t - [ ] Release blobstore\n\t\t - [ ] Configuring S3 release blobstore\n\n### Extending BOSH to support other IaaSs\n\n - [ ] What is a CPI?\n - [ ] Building a CPI\n\t - [ ] CPI API v1\n\t - [ ] Interactions between CPIs and BOSH Agent\n - [ ] Building a stemcell\n" }, { "alpha_fraction": 0.6607629656791687, "alphanum_fraction": 0.692098081111908, "avg_line_length": 37.578948974609375, "blob_id": "3f7389be938d2631003aa341ce5fd00257fd4609", "content_id": "c3511cdb8766dab4b2fb3127c20c9a7bbe9be09b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 734, "license_type": "no_license", "max_line_length": 68, "num_lines": 19, "path": "/java-concurrency-in-practice/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Java Concurrency in Practice](http://jcip.net)\n\n - [x] Preface\n - [x] Chapter 1 - Introduction\n - [x] Chapter 2 - Thread Safety\n - [x] Chapter 3 - Sharing Objects\n - [x] Chapter 4 - Composing Objects\n - [x] Chapter 5 - Building Blocks\n - [x] Chapter 6 - Task Execution\n - [x] Chapter 7 - Cancellation and Shutdown\n - [x] Chapter 8 - Applying Thread Pools\n - [x] Chapter 9 - GUI Applications\n - [x] Chapter 10 - Avoiding Liveness Hazards\n - [x] Chapter 11 - Performance and Scalability\n - [x] Chapter 12 - Testing Concurrent Programs\n - [x] Chapter 13 - Explicit Locks\n - [x] Chapter 14 - Building Custom Synchronizers\n - [x] Chapter 15 - Atomic Variables and Nonblocking Synchronization\n - [x] Chapter 16 - The Java Memory Model\n" }, { "alpha_fraction": 0.6929460763931274, "alphanum_fraction": 0.6929460763931274, "avg_line_length": 18.200000762939453, "blob_id": "eef33939dc69eb89c2839f4ae54337bc332e826d", "content_id": "b51ca91bd7b068048a325b7f5a69f2f975528150", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 482, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/zeromq-the-guide/chapter2/zeroapi/utils.h", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n\ntypedef struct IntNode\n{\n int data;\n struct IntNode *next;\n} IntNode;\n\n\ntypedef struct IntList\n{\n IntNode *head;\n} IntList;\n\n\nIntList *IntList_create();\nvoid IntList_push(IntList *int_list, int data);\n/* int IntList_pop(IntList *int_list); */\n/* int IntList_lpop(IntList *int_list); */\nvoid IntList_print(IntList *int_list);\nvoid IntList_destroy(IntList *int_list);\n\n\nIntList *get_ports(int argc, char *argv[]);\n\nchar *connection_str(int port);\nchar *generate_node_id();\n" }, { "alpha_fraction": 0.5728882551193237, "alphanum_fraction": 0.5933242440223694, "avg_line_length": 26.185184478759766, "blob_id": "c247fe1e411fe03aedd7cb4f33fcf1fab3c49b0d", "content_id": "2f79f9ae8f45de38214bf22eb8f8601154438a21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1468, "license_type": "no_license", "max_line_length": 86, "num_lines": 54, "path": "/zeromq-the-guide/chapter1/weather_client.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <zmq.h>\n#include <assert.h>\n#include \"zhelpers.h\"\n\n\n#define PORT \"5556\"\n#define DEFAULT_ZIP \"10001\"\n\n\nchar *make_filter(int argc, char *argv[])\n{\n if (argc > 1) {\n char buffer[15] = {\"Weather{\\0\"};\n strncat(buffer, argv[1], 5);\n return strndup(buffer, sizeof(buffer));\n } else {\n return \"Weather{\" DEFAULT_ZIP;\n }\n}\n\n\nint main(int argc, char *argv[])\n{\n printf(\"Starting weather client on port \"PORT\"\\n\");\n char *filter = make_filter(argc, argv);\n printf(\"Applying filter for zipcode starting with: '%s'\\n\", filter + 8);\n void *context = zmq_ctx_new();\n void *subscriber = zmq_socket(context, ZMQ_SUB);\n int rc = zmq_connect(subscriber, \"tcp://localhost:\"PORT);\n assert(rc == 0);\n\n // subscribe to weather updates from zipcode, default is NYC, 10001\n rc = zmq_setsockopt(subscriber, ZMQ_SUBSCRIBE, filter, strlen(filter));\n assert(rc == 0);\n\n int i;\n long total_temp = 0;\n for (i = 0; i < 100; i++) {\n char *weather_str = s_recv(subscriber);\n printf(\"received: %s\\n\", weather_str);\n int zip, temp, humidity;\n sscanf(weather_str, \"Weather{%d, %d, %d}\", &zip, &temp, &humidity);\n total_temp += temp;\n\n free(weather_str);\n }\n printf(\"Average temperature for zipcode starting with '%s' was %dF\\n\", filter + 8,\n (int) (total_temp / i));\n\n zmq_close(subscriber);\n zmq_ctx_destroy(context);\n return 0;\n}\n" }, { "alpha_fraction": 0.5396584272384644, "alphanum_fraction": 0.622390866279602, "avg_line_length": 25.089109420776367, "blob_id": "d9b794595503df819b1f7d7fc621fe93647c019c", "content_id": "72f9b38b40c7c141ea5f04777f4f88539a9cecb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2635, "license_type": "no_license", "max_line_length": 72, "num_lines": 101, "path": "/algorithms/sort/radix_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"reflect\"\n\tgosort \"sort\"\n\t\"testing\"\n\n\tsort \"github.com/jasonkeene/playground/algorithms/sort\"\n)\n\nfunc TestRadixCorrectness(t *testing.T) {\n\tcases := map[string][]string{\n\t\t\"empty\": []string{},\n\t\t\"single item\": []string{\"ABC123\"},\n\t\t\"multiple items\": []string{\"ABC123\", \"QAD613\", \"ABC612\"},\n\t\t\"duplicate items\": []string{\"ABC123\", \"WAB614\", \"ABC123\"},\n\t\t\"all the same items\": []string{\"ABC123\", \"ABC123\", \"ABC123\"},\n\t\t\"already sorted\": []string{\"ABC123\", \"ABC612\", \"QAD613\"},\n\t\t\"reversed sorted\": []string{\"QAD613\", \"ABC612\", \"ABC123\"},\n\n\t\t\"rand_8\": randomString(8),\n\t\t\"rand_64\": randomString(64),\n\t\t\"rand_1024\": randomString(1024),\n\t}\n\n\tfor k, v := range cases {\n\t\tt.Run(k, func(t *testing.T) {\n\t\t\texpected := make([]string, len(v))\n\t\t\tcopy(expected, v)\n\t\t\tgosort.Strings(expected)\n\n\t\t\tsort.Radix(v, 6)\n\n\t\t\tif !reflect.DeepEqual(expected, v) {\n\t\t\t\tt.Errorf(`%q failed to sort:\n\tExpected: %v\n\tActual: %v`, k, expected, v)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc randomString(s int) []string {\n\ta := make([]string, s)\n\tfor i := 0; i < s; i++ {\n\t\ta[i] = fmt.Sprintf(\n\t\t\t\"%s%s%s%d%d%d\",\n\t\t\tstring(rand.Intn(26)+1|0x40),\n\t\t\tstring(rand.Intn(26)+1|0x40),\n\t\t\tstring(rand.Intn(26)+1|0x40),\n\t\t\trand.Intn(9),\n\t\t\trand.Intn(9),\n\t\t\trand.Intn(9),\n\t\t)\n\t}\n\treturn a\n}\n\nfunc BenchmarkRadix_64(b *testing.B) { benchRadix(b, 64) }\nfunc BenchmarkRadix_1024(b *testing.B) { benchRadix(b, 1024) }\nfunc BenchmarkRadix_16384(b *testing.B) { benchRadix(b, 16384) }\nfunc BenchmarkRadix_262144(b *testing.B) { benchRadix(b, 262144) }\nfunc BenchmarkRadix_1048576(b *testing.B) { benchRadix(b, 1048576) }\nfunc BenchmarkRadix_16777216(b *testing.B) { benchRadix(b, 16777216) }\n\nfunc benchRadix(b *testing.B, size int) {\n\tb.StopTimer()\n\n\tfixtures := make([][]string, b.N)\n\tfor i := 0; i < b.N; i++ {\n\t\tfixtures[i] = randomString(size)\n\t}\n\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tsort.Radix(fixtures[i], 6)\n\t}\n}\n\nfunc BenchmarkStdlib_64(b *testing.B) { benchStdlib(b, 64) }\nfunc BenchmarkStdlib_1024(b *testing.B) { benchStdlib(b, 1024) }\nfunc BenchmarkStdlib_16384(b *testing.B) { benchStdlib(b, 16384) }\nfunc BenchmarkStdlib_262144(b *testing.B) { benchStdlib(b, 262144) }\nfunc BenchmarkStdlib_1048576(b *testing.B) { benchStdlib(b, 1048576) }\nfunc BenchmarkStdlib_16777216(b *testing.B) { benchStdlib(b, 16777216) }\n\nfunc benchStdlib(b *testing.B, size int) {\n\tb.StopTimer()\n\n\tfixtures := make([][]string, b.N)\n\tfor i := 0; i < b.N; i++ {\n\t\tfixtures[i] = randomString(size)\n\t}\n\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tgosort.Strings(fixtures[i])\n\t}\n}\n" }, { "alpha_fraction": 0.6145833134651184, "alphanum_fraction": 0.6354166865348816, "avg_line_length": 18.636363983154297, "blob_id": "3dc0e7f005436f6b50b2793f762f64711845c771", "content_id": "795dea82b8d974d5ec5844b4d5136133375a87fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 864, "license_type": "no_license", "max_line_length": 65, "num_lines": 44, "path": "/algorithms/sort/counting_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort_test\n\nimport (\n\t\"reflect\"\n\tgosort \"sort\"\n\t\"testing\"\n\n\tsort \"github.com/jasonkeene/playground/algorithms/sort\"\n)\n\nfunc TestCountingCorrectness(t *testing.T) {\n\ttestCorrectnessRange(t, 5, sort.Counting(5))\n}\n\nfunc TestReallySimpleSortCorrectness(t *testing.T) {\n\ttestCorrectnessRange(t, 2, sort.ReallySimpleSort)\n}\n\nfunc testCorrectnessRange(t *testing.T, max int, f func([]int)) {\n\tcases := map[string][]int{\n\t\t\"empty\": []int{},\n\t\t\"single item\": []int{max - 1},\n\n\t\t\"rand_8\": random(max, 8),\n\t\t\"rand_64\": random(max, 64),\n\t\t\"rand_1024\": random(max, 1024),\n\t}\n\n\tfor k, v := range cases {\n\t\tt.Run(k, func(t *testing.T) {\n\t\t\texpected := make([]int, len(v))\n\t\t\tcopy(expected, v)\n\t\t\tgosort.Ints(expected)\n\n\t\t\tf(v)\n\n\t\t\tif !reflect.DeepEqual(expected, v) {\n\t\t\t\tt.Errorf(`%q failed to sort:\n\tExpected: %v\n\tActual: %v`, k, expected, v)\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5782312750816345, "avg_line_length": 12.363636016845703, "blob_id": "6167e200d726eebaf6c8918069076326a6d03517", "content_id": "bfebde6de1027873d8e9150244e67801a21fd72c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 148, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/algorithms/search/linear.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package search\n\n// Θ(n)\nfunc Linear(data []int, number int) int {\n\tfor i, num := range data {\n\t\tif num == number {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n" }, { "alpha_fraction": 0.5508981943130493, "alphanum_fraction": 0.553892195224762, "avg_line_length": 11.84615421295166, "blob_id": "e1296e419795effea04389923427c98d4b2fde18", "content_id": "2b83debf785355033409f6288620d06193c8d0fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 334, "license_type": "no_license", "max_line_length": 55, "num_lines": 26, "path": "/cyclical-pipe/child/main.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tin := bufio.NewReader(os.Stdin)\n\n\tfor {\n\t\ttext, err := in.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tn, err := strconv.Atoi(strings.TrimRight(text, \"\\n\"))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfmt.Printf(\"%d\\n\", n+2)\n\t}\n}\n" }, { "alpha_fraction": 0.45406824350357056, "alphanum_fraction": 0.49868765473365784, "avg_line_length": 22.8125, "blob_id": "681e15ef9ac7eaf8ba9687a55c8facafa92727a2", "content_id": "65b7273dbfe0a0661f4d9c137fa1f77afbf87303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 381, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/cpp-how-to-program/chapter8/cstrings.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nint main()\n{\n char str1[] = \"test1\";\n char str2[] = \"test2\";\n const char *str3 = \"test3\";\n char str4[] = \"test4\";\n\n std::cout << \"str1: \" << (long)str1 << std::endl;\n std::cout << \"str2: \" << (long)str2 << std::endl;\n std::cout << \"str3: \" << (long)str3 << std::endl;\n std::cout << \"str4: \" << (long)str4 << std::endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.47863247990608215, "alphanum_fraction": 0.4991452991962433, "avg_line_length": 19.89285659790039, "blob_id": "bc97ed7ea9637ea66e2e62acbd5174aa95bd770a", "content_id": "9c92b4e4f9caf7ec423d42bcd36bd8ce92db074f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 585, "license_type": "no_license", "max_line_length": 55, "num_lines": 28, "path": "/cpp-how-to-program/chapter8/arrays.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nvoid double_it(int * const arr, const size_t len)\n{\n for (size_t i = 0; i < len; i++) {\n arr[i] += arr[i];\n }\n}\n\nvoid print_it(const int * const arr, const size_t len)\n{\n\n for (size_t i = 0; i < len; i++) {\n std::cout << arr[i] << (i < len-1 ? \" \" : \"\");\n }\n std::cout << std::endl;\n}\n\nint main()\n{\n const size_t my_len = 6;\n int my_arr[my_len] = {4, 1, 16, 0, 8, 2};\n double_it(my_arr, my_len);\n print_it(my_arr, my_len);\n std::sort(std::begin(my_arr), std::end(my_arr));\n print_it(my_arr, my_len);\n return 0;\n}\n" }, { "alpha_fraction": 0.5691347122192383, "alphanum_fraction": 0.5731489658355713, "avg_line_length": 23.63736343383789, "blob_id": "e5ce39323196e9d313c29d60c8fd9ce5a6a133f2", "content_id": "bfe878f99b05e7ab37508ca440a9e9ae0250c86a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2242, "license_type": "no_license", "max_line_length": 72, "num_lines": 91, "path": "/zeromq-the-guide/chapter2/zeroapi/server.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <assert.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include <zmq.h>\n\n#include \"utils.h\"\n\n\nchar *node_id;\n\n\nint main(int argc, char *argv[])\n{\n // verify args\n assert(argc > 2);\n\n // set node id\n node_id = generate_node_id();\n printf(\"Node ID: %s\\n\", node_id);\n\n // create context and sockets\n void *context = zmq_ctx_new();\n void *puller = zmq_socket(context, ZMQ_PULL);\n void *publisher = zmq_socket(context, ZMQ_PUB);\n\n // get ports list\n IntList *ports = get_ports(argc, argv);\n\n // connect sockets\n printf(\"Starting puller on port %i.\\n\", ports->head->data);\n char *connect_str = connection_str(ports->head->data);\n zmq_bind(puller, connect_str);\n free(connect_str);\n\n printf(\"Starting publisher on port %i.\\n\", ports->head->next->data);\n connect_str = connection_str(ports->head->next->data);\n zmq_bind(publisher, connect_str);\n free(connect_str);\n\n while (1) {\n // loop variables\n zmq_msg_t recv_msg;\n zmq_msg_t send_msg;\n char *msg_str;\n int msg_len;\n char *source_node_id;\n\n // setup pull message\n zmq_msg_init(&recv_msg);\n zmq_msg_recv(&recv_msg, puller, 0);\n msg_len = zmq_msg_size(&recv_msg);\n msg_str = malloc(msg_len + 1);\n memcpy(msg_str, zmq_msg_data(&recv_msg), msg_len);\n msg_str[msg_len] = 0;\n source_node_id = malloc(8);\n memcpy(source_node_id, msg_str, 8);\n\n // notify console that you got a message\n printf(\"got %s\\n\", msg_str);\n\n // cleanup\n zmq_msg_close(&recv_msg);\n\n // setup publish message\n zmq_msg_init_size(&send_msg, msg_len);\n memcpy(&send_msg, msg_str, msg_len);\n\n // notify console that you are about to send message\n printf(\"sending: %s (len: %i)\\n\", msg_str, msg_len);\n\n // send message\n zmq_msg_send(&send_msg, publisher, 0);\n\n // cleanup\n free(msg_str);\n free(source_node_id);\n zmq_msg_close(&send_msg);\n }\n\n // cleanup\n IntList_destroy(ports);\n zmq_close(puller);\n zmq_close(publisher);\n zmq_ctx_destroy(context);\n free(node_id);\n\n printf(\"Stopping server.\\n\");\n return 0;\n}\n" }, { "alpha_fraction": 0.6021599769592285, "alphanum_fraction": 0.6163650751113892, "avg_line_length": 19.68008041381836, "blob_id": "a3e6bff565daaad50314335a2ab4c4ab99a7c536", "content_id": "80c0074d38bbd5caf3771f06c6ff975f0b1d36ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 10278, "license_type": "no_license", "max_line_length": 95, "num_lines": 497, "path": "/data-structures/bst/bst_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package bst_test\n\nimport (\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\n\t\"github.com/jasonkeene/playground/data-structures/bst\"\n)\n\nvar _ bst.SearchTree = &bst.Node{}\n\nfunc TestBST(t *testing.T) {\n\tt.Run(\"search\", func(t *testing.T) {\n\t\tnodeToFind := &bst.Node{\n\t\t\tKey: 25,\n\t\t\tValue: \"test-value\",\n\t\t}\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 20,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 10,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 30,\n\t\t\t\tLeft: nodeToFind,\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 35,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tfound := tree.Search(nodeToFind.Key)\n\n\t\tif !cmp.Equal(found, nodeToFind, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(found, nodeToFind), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"search for value that does not exist\", func(t *testing.T) {\n\t\tnodeToFind := &bst.Node{\n\t\t\tKey: 25,\n\t\t\tValue: \"test-value\",\n\t\t}\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 20,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 10,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 30,\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 35,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tfound := tree.Search(nodeToFind.Key)\n\n\t\tvar emptyNode *bst.Node\n\t\tif !cmp.Equal(found, emptyNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(found, emptyNode), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"search empty node\", func(t *testing.T) {\n\t\tvar emptyNode *bst.Node\n\n\t\tfound := emptyNode.Search(25)\n\n\t\tif !cmp.Equal(found, emptyNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(found, emptyNode), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"insert\", func(t *testing.T) {\n\t\tvar tree *bst.Node\n\n\t\ttree = tree.Insert(&bst.Node{\n\t\t\tKey: 20,\n\t\t})\n\t\ttree = tree.Insert(&bst.Node{\n\t\t\tKey: 30,\n\t\t})\n\t\ttree = tree.Insert(&bst.Node{\n\t\t\tKey: 10,\n\t\t})\n\n\t\texpectedTree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 20,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 10,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 30,\n\t\t\t},\n\t\t})\n\n\t\tif !cmp.Equal(tree, expectedTree, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree, expectedTree), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"inserts new value when key already exists\", func(t *testing.T) {\n\t\tnewNode := &bst.Node{\n\t\t\tKey: 20,\n\t\t\tValue: \"new-value\",\n\t\t}\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 20,\n\t\t\tValue: \"old-value\",\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 10,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 30,\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 25,\n\t\t\t\t},\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 35,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\ttree = tree.Insert(newNode)\n\n\t\tif !cmp.Equal(tree.Value, newNode.Value) {\n\t\t\tt.Fatal(cmp.Diff(tree.Value, newNode.Value))\n\t\t}\n\t})\n\n\tt.Run(\"minimum\", func(t *testing.T) {\n\t\texpectedMinNode := &bst.Node{\n\t\t\tKey: 5,\n\t\t\tValue: \"min-node\",\n\t\t}\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 20,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 10,\n\t\t\t\tLeft: expectedMinNode,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 30,\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 25,\n\t\t\t\t},\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 35,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tminNode := tree.Minimum()\n\n\t\tif !cmp.Equal(minNode, expectedMinNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(minNode, expectedMinNode), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"maximum\", func(t *testing.T) {\n\t\texpectedMaxNode := &bst.Node{\n\t\t\tKey: 50,\n\t\t\tValue: \"max-node\",\n\t\t}\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 20,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 10,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 30,\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 25,\n\t\t\t\t},\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 35,\n\t\t\t\t\tRight: expectedMaxNode,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tmaxNode := tree.Maximum()\n\n\t\tif !cmp.Equal(maxNode, expectedMaxNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(maxNode, expectedMaxNode), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"successor in subtree\", func(t *testing.T) {\n\t\texpectedSuccessor := &bst.Node{\n\t\t\tKey: 9,\n\t\t\tValue: \"successor\",\n\t\t}\n\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 7,\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 13,\n\t\t\t\tLeft: expectedSuccessor,\n\t\t\t},\n\t\t})\n\n\t\tsuccessor := tree.Successor()\n\n\t\tif !cmp.Equal(successor, expectedSuccessor, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(successor, expectedSuccessor), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"successor in ancestry\", func(t *testing.T) {\n\t\tnode := &bst.Node{\n\t\t\tKey: 13,\n\t\t}\n\t\texpectedSuccessor := &bst.Node{\n\t\t\tKey: 15,\n\t\t\tValue: \"successor\",\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 6,\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 7,\n\t\t\t\t\tRight: node,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tensureCorrectTree(&bst.Node{\n\t\t\tKey: 27,\n\t\t\tLeft: expectedSuccessor,\n\t\t})\n\n\t\tsuccessor := node.Successor()\n\n\t\tif !cmp.Equal(successor, expectedSuccessor, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(successor, expectedSuccessor, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"predecessor in subtree\", func(t *testing.T) {\n\t\texpectedPredecessor := &bst.Node{\n\t\t\tKey: 4,\n\t\t\tValue: \"predecessor\",\n\t\t}\n\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 6,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 3,\n\t\t\t\tRight: expectedPredecessor,\n\t\t\t},\n\t\t})\n\n\t\tpredecessor := tree.Predecessor()\n\n\t\tif !cmp.Equal(predecessor, expectedPredecessor, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(predecessor, expectedPredecessor), cmp.Comparer(EqualNodes))\n\t\t}\n\t})\n\n\tt.Run(\"predecessor in ancestry\", func(t *testing.T) {\n\t\tnode := &bst.Node{\n\t\t\tKey: 42,\n\t\t}\n\t\texpectedPredecessor := &bst.Node{\n\t\t\tKey: 40,\n\t\t\tValue: \"predecessor\",\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 50,\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 45,\n\t\t\t\t\tLeft: node,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tensureCorrectTree(&bst.Node{\n\t\t\tKey: 30,\n\t\t\tRight: expectedPredecessor,\n\t\t})\n\n\t\tpredecessor := node.Predecessor()\n\n\t\tif !cmp.Equal(predecessor, expectedPredecessor, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(predecessor, expectedPredecessor, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"delete isolated\", func(t *testing.T) {\n\t\ttree := &bst.Node{\n\t\t\tKey: 20,\n\t\t}\n\n\t\ttree = tree.Delete()\n\n\t\tvar emptyNode *bst.Node\n\t\tif !cmp.Equal(tree, emptyNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree, emptyNode, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"delete with no children\", func(t *testing.T) {\n\t\ttoDelete := &bst.Node{\n\t\t\tKey: 20,\n\t\t}\n\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: toDelete,\n\t\t})\n\n\t\treplacedBy := toDelete.Delete()\n\n\t\tvar emptyNode *bst.Node\n\t\tif !cmp.Equal(replacedBy, emptyNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(replacedBy, emptyNode, cmp.Comparer(EqualNodes)))\n\t\t}\n\t\tif !cmp.Equal(tree.Right, emptyNode, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree.Right, emptyNode, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"delete with left child\", func(t *testing.T) {\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 20,\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 15,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\texpectedTree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 15,\n\t\t\t},\n\t\t})\n\n\t\treplacedBy := tree.Right.Delete()\n\n\t\tif !cmp.Equal(replacedBy, expectedTree.Right, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(replacedBy, expectedTree.Right, cmp.Comparer(EqualNodes)))\n\t\t}\n\t\tif !cmp.Equal(tree, expectedTree, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree, expectedTree, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"delete with right child\", func(t *testing.T) {\n\t\texpectedReplacedBy := &bst.Node{\n\t\t\tKey: 30,\n\t\t}\n\t\ttoDelete := &bst.Node{\n\t\t\tKey: 20,\n\t\t\tRight: expectedReplacedBy,\n\t\t}\n\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: toDelete,\n\t\t})\n\n\t\treplacedBy := toDelete.Delete()\n\n\t\tif !cmp.Equal(replacedBy, expectedReplacedBy, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(replacedBy, expectedReplacedBy, cmp.Comparer(EqualNodes)))\n\t\t}\n\t\tif !cmp.Equal(tree.Right, expectedReplacedBy, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree.Right, expectedReplacedBy, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"delete with both children and successor is a child\", func(t *testing.T) {\n\t\texpectedReplacedBy := &bst.Node{\n\t\t\tKey: 30,\n\t\t}\n\t\ttoDelete := &bst.Node{\n\t\t\tKey: 20,\n\t\t\tRight: expectedReplacedBy,\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 15,\n\t\t\t},\n\t\t}\n\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: toDelete,\n\t\t})\n\n\t\treplacedBy := toDelete.Delete()\n\n\t\tif !cmp.Equal(replacedBy, expectedReplacedBy, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(replacedBy, expectedReplacedBy, cmp.Comparer(EqualNodes)))\n\t\t}\n\t\tif !cmp.Equal(tree.Right, expectedReplacedBy, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree.Right, expectedReplacedBy, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n\n\tt.Run(\"delete with both children and successor is not a child\", func(t *testing.T) {\n\t\ttree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 20,\n\t\t\t\tValue: \"getting-deleted\",\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 15,\n\t\t\t\t},\n\t\t\t\tRight: &bst.Node{\n\t\t\t\t\tKey: 50,\n\t\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\t\tKey: 30,\n\t\t\t\t\t\tValue: \"replacing\",\n\t\t\t\t\t\tRight: &bst.Node{\n\t\t\t\t\t\t\tKey: 40,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\texpectedReplacedBy := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 30,\n\t\t\tValue: \"replacing\",\n\t\t\tLeft: &bst.Node{\n\t\t\t\tKey: 15,\n\t\t\t},\n\t\t\tRight: &bst.Node{\n\t\t\t\tKey: 50,\n\t\t\t\tLeft: &bst.Node{\n\t\t\t\t\tKey: 40,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\texpectedTree := ensureCorrectTree(&bst.Node{\n\t\t\tKey: 10,\n\t\t\tRight: expectedReplacedBy,\n\t\t})\n\n\t\treplacedBy := tree.Right.Delete()\n\n\t\tif !cmp.Equal(replacedBy, expectedReplacedBy, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(replacedBy, expectedReplacedBy, cmp.Comparer(EqualNodes)))\n\t\t}\n\t\tif !cmp.Equal(tree, expectedTree, cmp.Comparer(EqualNodes)) {\n\t\t\tt.Fatal(cmp.Diff(tree, expectedTree, cmp.Comparer(EqualNodes)))\n\t\t}\n\t})\n}\n\nfunc ensureCorrectTree(n *bst.Node) *bst.Node {\n\tif n.Left != nil {\n\t\tn.Left.Parent = n\n\t\tif n.Left.Key >= n.Key {\n\t\t\tlog.Fatal(\"incorrect tree\")\n\t\t}\n\t\tensureCorrectTree(n.Left)\n\t}\n\tif n.Right != nil {\n\t\tn.Right.Parent = n\n\t\tif n.Right.Key <= n.Key {\n\t\t\tlog.Fatal(\"incorrect tree\")\n\t\t}\n\t\tensureCorrectTree(n.Right)\n\t}\n\treturn n\n}\n\nfunc EqualNodes(x, y *bst.Node) bool {\n\tif x == nil {\n\t\treturn y == nil\n\t}\n\tif y == nil {\n\t\treturn x == nil\n\t}\n\teq := cmp.Equal(x, y, cmpopts.IgnoreFields(*x, \"Parent\", \"Left\", \"Right\"))\n\tif !eq {\n\t\treturn false\n\t}\n\n\tif x.Parent == nil && y.Parent != nil {\n\t\treturn false\n\t}\n\tif x.Parent != nil && y.Parent == nil {\n\t\treturn false\n\t}\n\tparentEq := cmp.Equal(x.Parent, y.Parent, cmpopts.IgnoreFields(*x, \"Parent\", \"Left\", \"Right\"))\n\tif !parentEq {\n\t\treturn false\n\t}\n\n\tif !EqualNodes(x.Left, y.Left) {\n\t\treturn false\n\t}\n\treturn EqualNodes(x.Right, y.Right)\n}\n" }, { "alpha_fraction": 0.6102564334869385, "alphanum_fraction": 0.6871795058250427, "avg_line_length": 96.5, "blob_id": "e57170fe63b5e1a790ed1d3d1477d8526bbfe4b4", "content_id": "bca93d85d42886c0a167bfc1fa7c0ba711eebe3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 390, "license_type": "no_license", "max_line_length": 125, "num_lines": 4, "path": "/coreos/sync.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrsync -avzci -e \"ssh -p 2222 -o StrictHostKeyChecking=no -i $HOME/.vagrant.d/insecure_private_key\" ./**/* [email protected]:/tmp\nrsync -avzci -e \"ssh -p 2200 -o StrictHostKeyChecking=no -i $HOME/.vagrant.d/insecure_private_key\" ./**/* [email protected]:/tmp\nrsync -avzci -e \"ssh -p 2201 -o StrictHostKeyChecking=no -i $HOME/.vagrant.d/insecure_private_key\" ./**/* [email protected]:/tmp\n" }, { "alpha_fraction": 0.7488151788711548, "alphanum_fraction": 0.7488151788711548, "avg_line_length": 34.16666793823242, "blob_id": "c37ca75a2899c6bb809fcfc69d6437fe27a5f16f", "content_id": "80d2a0ef422e767253c47ca0ea1698d64cf60602", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 78, "num_lines": 6, "path": "/python-logging/basic-logging-tutorial/to_file.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import logging\n\nlogging.basicConfig(filename=\"to_file.log\", filemode='w', level=logging.DEBUG)\nlogging.debug(\"In depth debug info here!\")\nlogging.info(\"Just some handy info :)\")\nlogging.warning(\"Danger Will Robinson\")\n" }, { "alpha_fraction": 0.4893162250518799, "alphanum_fraction": 0.5213675498962402, "avg_line_length": 20.272727966308594, "blob_id": "7cca8ad03a8090f1a994fc35aa273575a192917f", "content_id": "bc809f230f202c22e3120a2dc569399d563355b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/zerorpc-streaming/client.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport zerorpc\nimport time\n\n\ndef main():\n client = zerorpc.Client()\n client.connect(\"tcp://127.0.0.1:12345\")\n for i, item in enumerate(client.stream_randomness()):\n print \"got\", item\n if i == 3:\n print \"simulating network outage\"\n time.sleep(5)\n elif i == 9:\n print \"done, got\", i + 1, \"messages\"\n client.close()\n break\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5897920727729797, "alphanum_fraction": 0.6427221298217773, "avg_line_length": 45.64706039428711, "blob_id": "938edcb9ee805276ebf1881144ab833e330f1cd7", "content_id": "4f0dc693eec0742d7396e491cf77d0e0dd2506d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1587, "license_type": "no_license", "max_line_length": 111, "num_lines": 34, "path": "/cpp-how-to-program/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [C++ How to Program](http://www.amazon.com/dp/0133378713)\n\n - [x] 8. Pointers\n - [x] 8.1 Introduction\n - [x] 8.2 Pointer Varible Declarations and Initialization\n - [x] 8.3 Pointer Operators\n - [x] 8.4 Pass-by-Reference with Pointers\n - [x] 8.5 Built-In Arrays\n - [x] 8.6 Using const with Pointers\n - [x] 8.6.1 Nonconstant Pointer to Nonconstant Data\n - [x] 8.6.2 Nonconstant Pointer to Constant Data\n - [x] 8.6.3 Constant Pointer to Nonconstant Data\n - [x] 8.6.4 Constant Pointer to Constant Data\n - [x] 8.7 sizeof Operator\n - [x] 8.8 Pointer Expressions and Pointer Arithmetic\n - [x] 8.9 Relationship Between Pointers and Built-In Arrays\n - [x] 8.10 Pointer-Based Strings\n - [x] 8.11 Wrap-Up\n - [x] 9. Classes: A Deeper Look; Throwing Exceptions\n - [x] 9.1 Introduction\n - [x] 9.2 Time Class Case Study\n - [x] 9.3 Class Scope and Accessing Class Members\n - [x] 9.4 Access Functions and Utility Functions\n - [x] 9.5 Time Class Case Study: Constructors with Default Arguments\n - [x] 9.6 Destructors\n - [x] 9.7 When Constructors and Destructors Are Called\n - [x] 9.8 Time Class Case Study: A subtle Trap-Returning a Reference or a Pointer to a private Data member\n - [x] 9.9 Default Memberwise Assignemnt\n - [x] 9.10 const Objects and const Member Functions\n - [x] 9.11 Composition: Objects as Members of Classes\n - [x] 9.12 friend Functions and friend Classes\n - [x] 9.13 Using the this Pointer\n - [x] 9.14 static Class Members\n - [x] 9.15 Wrap-Up\n" }, { "alpha_fraction": 0.44921875, "alphanum_fraction": 0.4895833432674408, "avg_line_length": 13.49056625366211, "blob_id": "a282b2e253fd26450abccf70895730ae5691a4ea", "content_id": "df3b1f4ec3823e048b501cafc256e9b43e5cecea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 768, "license_type": "no_license", "max_line_length": 45, "num_lines": 53, "path": "/algorithms/sort/radix.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort\n\nfunc Radix(a []string, size int) {\n\tfor i := size - 1; i >= 0; i-- {\n\t\tcounting(a, i)\n\t}\n}\n\nfunc counting(a []string, p int) {\n\t// equal\n\tvar bucket [36]int\n\tfor _, v := range a {\n\t\tbucket[bv[v[p]]]++\n\t}\n\n\t// less\n\ttmp := bucket[0]\n\tbucket[0] = 0\n\tfor i := 1; i < len(bucket); i++ {\n\t\ttmp, bucket[i] = bucket[i], tmp+bucket[i-1]\n\t}\n\n\t// sort\n\tsorted := make([]string, len(a))\n\tfor _, v := range a {\n\t\ti := bv[v[p]]\n\t\tsorted[bucket[i]] = v\n\t\tbucket[i]++\n\t}\n\n\t// replace\n\tfor i, v := range sorted {\n\t\ta[i] = v\n\t}\n}\n\nvar bv = func() [256]int {\n\tvar bv [256]int\n\n\tfor r := 0; r < 256; r++ {\n\t\t// digit\n\t\tif r >= 48 && r < 58 {\n\t\t\tbv[r] = r - 48\n\t\t\tcontinue\n\t\t}\n\t\t// capital letter\n\t\tif r >= 65 && r < 91 {\n\t\t\tbv[r] = r - 55\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn bv\n}()\n" }, { "alpha_fraction": 0.5595105886459351, "alphanum_fraction": 0.5672969818115234, "avg_line_length": 15.962264060974121, "blob_id": "9cae4a85416061c07a06957d121e5b8afaa0eb84", "content_id": "9a282b71540468ecf6fa85cc6ad5270da95cfb33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 899, "license_type": "no_license", "max_line_length": 55, "num_lines": 53, "path": "/data-structures/queue/array_priority_queue.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package queue\n\nimport \"log\"\n\ntype Array struct {\n\tvalues []Element\n}\n\nfunc NewArray(qes ...Element) *Array {\n\treturn &Array{\n\t\tvalues: qes,\n\t}\n}\n\nfunc (a *Array) Empty() bool {\n\treturn len(a.values) == 0\n}\n\nfunc (a *Array) Insert(qe Element) {\n\tfor i, v := range a.values {\n\t\tif v.Key >= qe.Key {\n\t\t\ta.values = append(a.values, Element{})\n\t\t\tcopy(a.values[i+1:], a.values[i:])\n\t\t\ta.values[i] = qe\n\t\t\treturn\n\t\t}\n\t}\n\ta.values = append(a.values, qe)\n}\n\nfunc (a *Array) PopMin() Element {\n\tmin := a.values[0]\n\ta.values = a.values[1:]\n\treturn min\n}\n\nfunc (a *Array) Decrease(qe Element) {\n\tfor i, v := range a.values {\n\t\tif v.Value == qe.Value {\n\t\t\ta.values[i].Key = qe.Key\n\t\t\tj := i - 1\n\t\t\tfor {\n\t\t\t\tif j < 0 || a.values[j].Key <= a.values[i].Key {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ta.values[i], a.values[j] = a.values[j], a.values[i]\n\t\t\t\ti, j = j, j-1\n\t\t\t}\n\t\t}\n\t}\n\tlog.Fatalf(\"value was not found for: %v\", qe)\n}\n" }, { "alpha_fraction": 0.8208954930305481, "alphanum_fraction": 0.8208954930305481, "avg_line_length": 65, "blob_id": "c64a257dbc2091e8af2e2c737e68b14effbec42f", "content_id": "f4a03a472cb6f144cc4a6f15858ea039f689c49f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 67, "license_type": "no_license", "max_line_length": 65, "num_lines": 1, "path": "/saltstack/prototypes/todo.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": " - Get application servers announcing to the reverse proxy servers\n" }, { "alpha_fraction": 0.5773913264274597, "alphanum_fraction": 0.5852174162864685, "avg_line_length": 22, "blob_id": "784e319288da0f5749468372b65aa964e450b2cc", "content_id": "2be81081253d80607ffd15eeb784aac2af652aaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1150, "license_type": "no_license", "max_line_length": 74, "num_lines": 50, "path": "/algorithms/graph/floyd_warshall.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nimport \"math\"\n\nfunc FloydWarshall(g Graph) ([][]float64, [][]int) {\n\t// init shortest\n\tshortest := make([][]float64, len(g.Nodes))\n\tfor u := range shortest {\n\t\tshortest[u] = make([]float64, len(g.Nodes))\n\t\tfor v := range shortest[u] {\n\t\t\tshortest[u][v] = math.Inf(1)\n\t\t}\n\t}\n\n\t// init prev\n\tprev := make([][]int, len(g.Nodes))\n\tfor u := range prev {\n\t\tprev[u] = make([]int, len(g.Nodes))\n\t\tfor v := range prev[u] {\n\t\t\tprev[u][v] = -1\n\t\t}\n\t}\n\n\t// populate existing values for shortest and prev based on edges\n\tfor u := range g.Nodes {\n\t\tshortest[u][u] = 0\n\t\tfor _, e := range g.Edges[u] {\n\t\t\tshortest[u][e.Target] = e.Weight\n\t\t\tprev[u][e.Target] = u\n\t\t}\n\t}\n\n\t// consider every path of u->v, if it can be improved by visiting x do so\n\tfor x := range g.Nodes {\n\t\tfor u := range g.Nodes {\n\t\t\tfor v := range g.Nodes {\n\t\t\t\t// see if going from u->v via x is shorter\n\t\t\t\tshortestViaX := shortest[u][x] + shortest[x][v]\n\t\t\t\tif shortestViaX < shortest[u][v] {\n\t\t\t\t\t// update the shortest val\n\t\t\t\t\tshortest[u][v] = shortestViaX\n\t\t\t\t\t// prev should now be prev between x->v\n\t\t\t\t\tprev[u][v] = prev[x][v]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn shortest, prev\n}\n" }, { "alpha_fraction": 0.5393053293228149, "alphanum_fraction": 0.5557586550712585, "avg_line_length": 19.259260177612305, "blob_id": "18ca2e0e0f3044ebb771e3eec1a5cbc9dd537d65", "content_id": "2bb49b2828ce66cc5ca61622ec62e1f25f769828", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 547, "license_type": "no_license", "max_line_length": 58, "num_lines": 27, "path": "/zeromq-the-guide/chapter2/rrclient.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n#include <zmq.h>\n\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n void *context = zmq_ctx_new();\n\n void *requester = zmq_socket(context, ZMQ_REQ);\n zmq_connect(requester, \"tcp://localhost:11111\");\n\n for (int i = 0; i < 10; i++) {\n printf(\"Sending request %i \\\"%s\\\"\\n\", i, \"Hello\");\n s_send(requester, \"Hello\");\n char *string = s_recv(requester);\n printf(\"Got reply %i \\\"%s\\\"\\n\", i, string);\n free(string);\n }\n\n zmq_close(requester);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.4585764408111572, "alphanum_fraction": 0.46791133284568787, "avg_line_length": 14.16814136505127, "blob_id": "9ae4aa8137763b7b79286ebaf3bfb86102624e93", "content_id": "f516a4fc173b6d9928a82ec5ae336eb8c0323d8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1714, "license_type": "no_license", "max_line_length": 53, "num_lines": 113, "path": "/parsers/ini/parse/parse_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package parse_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/parsers/ini/parse\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tinput string\n\t\texpected parse.File\n\t}{\n\t\t\"empty\": {\n\t\t\texpected: parse.File{\n\t\t\t\tName: \"test.ini\",\n\t\t\t\tSections: []parse.Section{\n\t\t\t\t\t{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"extra whitespace\": {\n\t\t\tinput: `\n\t\t\t\t[section]\n\t\t\t\tkey = val\n\t\t\t`,\n\t\t\texpected: parse.File{\n\t\t\t\tName: \"test.ini\",\n\t\t\t\tSections: []parse.Section{\n\t\t\t\t\t{},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"section\",\n\t\t\t\t\t\tKeyValues: []parse.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"key\",\n\t\t\t\t\t\t\t\tValue: \"val\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"normal\": {\n\t\t\tinput: `\n\nbareKey=bareValue\n\n[sectionA]\nkeyA1=valA1\nkeyA2=valA2\n\n[sectionB]\nkeyB1=valB1\nkeyB2=valB2\n\n\t\t`,\n\t\t\texpected: parse.File{\n\t\t\t\tName: \"test.ini\",\n\t\t\t\tSections: []parse.Section{\n\t\t\t\t\t{\n\t\t\t\t\t\tKeyValues: []parse.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"bareKey\",\n\t\t\t\t\t\t\t\tValue: \"bareValue\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sectionA\",\n\t\t\t\t\t\tKeyValues: []parse.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyA1\",\n\t\t\t\t\t\t\t\tValue: \"valA1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyA2\",\n\t\t\t\t\t\t\t\tValue: \"valA2\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sectionB\",\n\t\t\t\t\t\tKeyValues: []parse.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyB1\",\n\t\t\t\t\t\t\t\tValue: \"valB1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyB2\",\n\t\t\t\t\t\t\t\tValue: \"valB2\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tf, err := parse.Parse(\"test.ini\", tc.input)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !cmp.Equal(f, tc.expected) {\n\t\t\t\tt.Error(cmp.Diff(f, tc.expected))\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.6479549407958984, "alphanum_fraction": 0.7151502370834351, "avg_line_length": 80.2203369140625, "blob_id": "cd70549686a42df461f543b0a0d3f4c8ce862a18", "content_id": "4fdae98fee48cc0782864baaabf7ff1fba1f6229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19180, "license_type": "no_license", "max_line_length": 211, "num_lines": 236, "path": "/golang/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [Documentation](http://golang.org/doc/)\n\n - [x] [How to write Go code](http://golang.org/doc/code.html)\n - [ ] [Language Specification](http://golang.org/ref/spec)\n - [ ] [Writing Web Applications](http://golang.org/doc/articles/wiki/)\n - [ ] [Effective Go](http://golang.org/doc/effective_go.html)\n - [ ] [Command go](http://golang.org/cmd/go/)\n - [ ] [Package testing](http://golang.org/pkg/testing/)\n - [ ] [Package json](http://golang.org/pkg/encoding/json/)\n\n\n## Blogs\n\n#### [The Go Blog](http://blog.golang.org/)\n\n - [ ] [Go's Declaration Syntax](http://blog.golang.org/gos-declaration-syntax)\n - [ ] [Defer, Panic, and Recover](http://blog.golang.org/defer-panic-and-recover)\n - [ ] [Go Slices: usage and internals](http://blog.golang.org/go-slices-usage-and-internals)\n - [ ] [Strings, bytes, runes and characters in Go](http://blog.golang.org/strings)\n - [ ] [Concurrency is not parallelism](http://blog.golang.org/concurrency-is-not-parallelism)\n - [x] [JSON and Go](http://blog.golang.org/json-and-go)\n - [ ] [Error handling and Go](http://blog.golang.org/error-handling-and-go)\n - [x] [Defer, Panic, and Recover](http://blog.golang.org/defer-panic-and-recover)\n\n#### [Dave Cheney](http://dave.cheney.net/category/golang)\n\n<!--\n// use this function to pull links out of Dave's blog\n(function () {\n var out = \"\";\n var elements = document.querySelectorAll('.post .entry-title a');\n for (var i = 0; i < elements.length; i++) {\n var e = elements[i];\n out += \" - [ ] [\" + e.text + \"](\" + e.href + \")\\n\";\n }\n console.log(out);\n})();\n-->\n\n - [ ] [Investigate your package dependencies with prdeps and Unix](http://dave.cheney.net/2016/02/29/investigate-your-package-dependencies-with-prdeps-and-unix)\n - [ ] [Unhelpful abstractions](http://dave.cheney.net/2016/02/06/unhelpful-abstractions)\n - [ ] [cgo is not Go](http://dave.cheney.net/2016/01/18/cgo-is-not-go)\n - [ ] [Are Go maps sensitive to data races ?](http://dave.cheney.net/2015/12/07/are-go-maps-sensitive-to-data-races)\n - [ ] [A whirlwind tour of Go’s runtime environment variables](http://dave.cheney.net/2015/11/29/a-whirlwind-tour-of-gos-runtime-environment-variables)\n - [ ] [Wednesday pop quiz: spot the race](http://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race)\n - [ ] [The Legacy of Go](http://dave.cheney.net/2015/11/15/the-legacy-of-go)\n - [ ] [Let’s talk about logging](http://dave.cheney.net/2015/11/05/lets-talk-about-logging)\n - [ ] [Bootstrapping Go 1.5 on non Intel platforms](http://dave.cheney.net/2015/10/16/bootstrapping-go-1-5-on-non-intel-platforms)\n - [x] [Padding is hard](http://dave.cheney.net/2015/10/09/padding-is-hard)\n - [ ] [Building Go 1.5 on the Raspberry Pi](http://dave.cheney.net/2015/09/04/building-go-1-5-on-the-raspberry-pi)\n - [x] [Cross compilation with Go 1.5](http://dave.cheney.net/2015/08/22/cross-compilation-with-go-1-5)\n - [x] [Using gb as my every day build tool](http://dave.cheney.net/2015/08/19/using-gb-as-my-every-day-build-tool)\n - [x] [Performance without the event loop](http://dave.cheney.net/2015/08/08/performance-without-the-event-loop)\n - [x] [Why Go and Rust are not competitors](http://dave.cheney.net/2015/07/02/why-go-and-rust-are-not-competitors)\n - [x] [gb, a project based build tool for the Go programming language](http://dave.cheney.net/2015/06/09/gb-a-project-based-build-tool-for-the-go-programming-language)\n - [x] [Friday pop quiz: the smallest buffer](http://dave.cheney.net/2015/06/05/friday-pop-quiz-the-smallest-buffer)\n - [x] [Hear me speak about Go performance at OSCON](http://dave.cheney.net/2015/05/31/hear-me-speak-about-go-performance-at-oscon)\n - [x] [Struct composition with Go](http://dave.cheney.net/2015/05/22/struct-composition-with-go)\n - [x] [Introducing gb, a project based build tool for the Go programming language](http://dave.cheney.net/2015/05/12/introducing-gb)\n - [ ] [2015 is going to be the year of Go](http://dave.cheney.net/2015/03/28/2015-is-going-to-be-the-year-of-go)\n - [ ] [A parable about practice](http://dave.cheney.net/2015/03/26/a-parable-about-practice)\n - [ ] [Simplicity and collaboration](http://dave.cheney.net/2015/03/08/simplicity-and-collaboration)\n - [ ] [Cross compilation just got a whole lot better in Go 1.5](http://dave.cheney.net/2015/03/03/cross-compilation-just-got-a-whole-lot-better-in-go-1-5)\n - [ ] [Unofficial Go 1.4.2 tarballs now available](http://dave.cheney.net/2015/02/26/unofficial-go-1-4-2-tarballs-now-available)\n - [ ] [Lost in translation](http://dave.cheney.net/2015/02/25/lost-in-translation)\n - [ ] [Thanks Brainman](http://dave.cheney.net/2015/02/13/thanks-brainman)\n - [ ] [Errors and Exceptions, redux](http://dave.cheney.net/2015/01/26/errors-and-exceptions-redux)\n - [ ] [Inspecting errors](http://dave.cheney.net/2014/12/24/inspecting-errors)\n - [ ] [Unofficial Go 1.4 tarballs now available](http://dave.cheney.net/2014/12/14/unofficial-go-1-4-tarballs-now-available)\n - [ ] [Friday pop quiz: the size of things](http://dave.cheney.net/2014/12/12/friday-pop-quiz-the-size-of-things)\n - [ ] [Minimum one liner followup](http://dave.cheney.net/2014/12/05/minimum-one-liner-followup)\n - [ ] [Friday pop quiz: minimum one liner](http://dave.cheney.net/2014/12/05/friday-pop-quiz-minimum-one-liner)\n - [ ] [Five suggestions for setting up a Go project](http://dave.cheney.net/2014/12/01/five-suggestions-for-setting-up-a-go-project)\n - [ ] [Visualising dependencies](http://dave.cheney.net/2014/11/21/visualising-dependencies)\n - [ ] [Error handling vs. exceptions redux](http://dave.cheney.net/2014/11/04/error-handling-vs-exceptions-redux)\n - [ ] [Go, frameworks, and Ludditry](http://dave.cheney.net/2014/10/26/go-frameworks-and-ludditry)\n - [ ] [Simple profiling package moved, updated](http://dave.cheney.net/2014/10/22/simple-profiling-package-moved-updated)\n - [ ] [Functional options for friendly APIs](http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)\n - [ ] [That trailing comma](http://dave.cheney.net/2014/10/04/that-trailing-comma)\n - [ ] [Unofficial Go 1.3.3 ARM tarballs now available](http://dave.cheney.net/2014/10/03/unofficial-go-1-3-3-arm-tarballs-now-available)\n - [ ] [Using // +build to switch between debug and release builds](http://dave.cheney.net/2014/09/28/using-build-to-switch-between-debug-and-release)\n - [ ] [go list, your Swiss army knife](http://dave.cheney.net/2014/09/14/go-list-your-swiss-army-knife)\n - [ ] [How to install multiple versions of Go](http://dave.cheney.net/2014/09/13/how-to-install-multiple-versions)\n - [ ] [Go’s runtime C to Go rewrite, by the numbers](http://dave.cheney.net/2014/09/01/gos-runtime-c-to-go-rewrite-by-the-numbers)\n - [ ] [Go has both make and new functions, what gives ?](http://dave.cheney.net/2014/08/17/go-has-both-make-and-new-functions-what-gives)\n - [ ] [Tinyterm: A silly terminal emulator written in Go](http://dave.cheney.net/2014/08/03/tinyterm-a-silly-terminal-emulator-written-in-go)\n - [ ] [Visualising the Go garbage collector](http://dave.cheney.net/2014/07/11/visualising-the-go-garbage-collector)\n - [ ] [Unofficial Go 1.3 ARM tarballs now available](http://dave.cheney.net/2014/07/08/unofficial-go-1-3-arm-tarballs-now-available)\n - [ ] [Ice cream makers and data races](http://dave.cheney.net/2014/06/27/ice-cream-makers-and-data-races)\n - [ ] [Five things that make Go fast](http://dave.cheney.net/2014/06/07/five-things-that-make-go-fast)\n - [ ] [What does go build build ?](http://dave.cheney.net/2014/06/04/what-does-go-build-build)\n - [ ] [On declaring variables](http://dave.cheney.net/2014/05/24/on-declaring-variables)\n - [ ] [Go 1.3 linker improvements](http://dave.cheney.net/2014/05/22/go-1-3-linker-improvements)\n - [ ] [Accidental method value](http://dave.cheney.net/2014/05/19/accidental-method-value)\n - [ ] [Unofficial Go 1.2.2 and 1.3beta1 tarballs for ARM now available](http://dave.cheney.net/2014/05/09/unofficial-go-1-2-2-and-1-3beta1-tarballs-for-arm-now-available)\n - [ ] [term: low level serial with a high level interface](http://dave.cheney.net/2014/05/08/term-low-level-serial-with-a-high-level-interface)\n - [ ] [autobench-next updated for Go 1.3](http://dave.cheney.net/2014/05/04/autobench-next-updated-for-go-1-3)\n - [ ] [How to install multiple versions of Go](http://dave.cheney.net/2014/04/20/how-to-install-multiple-versions-of-go)\n - [ ] [Associative commentary follow up](http://dave.cheney.net/2014/03/30/associative-commentary-follow-up)\n - [ ] [Associative commentary](http://dave.cheney.net/2014/03/28/associative-commentary)\n - [ ] [The empty struct](http://dave.cheney.net/2014/03/25/the-empty-struct)\n - [ ] [Thoughts on Go package management six months on](http://dave.cheney.net/2014/03/22/thoughts-on-go-package-management-six-months-on)\n - [ ] [Channel Axioms](http://dave.cheney.net/2014/03/19/channel-axioms)\n - [ ] [Pointers in Go](http://dave.cheney.net/2014/03/17/pointers-in-go)\n - [ ] [pdp11 presentation slides](http://dave.cheney.net/2014/03/08/pdp11-presentation-slides)\n - [ ] [Using go test, build and install](http://dave.cheney.net/2014/01/21/using-go-test-build-and-install)\n - [ ] [Unofficial Go 1.2 tarballs for ARM now available](http://dave.cheney.net/2013/12/29/unofficial-go-1-2-tarballs-for-arm-now-available)\n - [ ] [Go 1.2 performance improvements](http://dave.cheney.net/2013/12/02/go-12-performance-improvements)\n - [ ] [A Go client for Joyent Manta](http://dave.cheney.net/2013/11/21/a-go-client-for-joyent-manta)\n - [ ] [Benchmarking Go 1.2rc5 vs gccgo](http://dave.cheney.net/2013/11/19/benchmarking-go-1-2rc5-vs-gccgo)\n - [ ] [Evaluation order oddity](http://dave.cheney.net/2013/11/15/evaluation-order-oddity)\n - [ ] [More simple test coverage in Go 1.2](http://dave.cheney.net/2013/11/14/more-simple-test-coverage-in-go-1-2)\n - [ ] [Stupid Go declaration tricks](http://dave.cheney.net/2013/11/14/stupid-go-declaration-tricks)\n - [ ] [Subcommand handling in Go](http://dave.cheney.net/2013/11/07/subcommand-handling-in-go)\n - [ ] [Calling for autobench contributions for Go 1.2](http://dave.cheney.net/2013/11/04/calling-for-autobench-contributions-for-go-1-2)\n - [ ] [How does the go build command work ?](http://dave.cheney.net/2013/10/15/how-does-the-go-build-command-work)\n - [ ] [How to use conditional compilation with the go build tool](http://dave.cheney.net/2013/10/12/how-to-use-conditional-compilation-with-the-go-build-tool)\n - [ ] [Why I think Go package management is important](http://dave.cheney.net/2013/10/10/why-i-think-go-package-management-is-important)\n - [ ] [Simple test coverage with Go 1.2](http://dave.cheney.net/2013/10/07/simple-test-coverage-with-go-1-2)\n - [ ] [#golang tweet popularity](http://dave.cheney.net/2013/09/21/golang-tweet-popularity)\n - [ ] [Release candidate 1 tarballs for ARM now available](http://dave.cheney.net/2013/09/21/release-candidate-1-tarballs-for-arm-now-available)\n - [ ] [Unofficial Go 1.1.2 tarballs for ARM now available](http://dave.cheney.net/2013/09/19/unofficial-go-1-1-2-tarballs-for-arm-now-available)\n - [ ] [How to include C code in your Go package](http://dave.cheney.net/2013/09/07/how-to-include-c-code-in-your-go-package)\n - [ ] [Introducing autobench-next](http://dave.cheney.net/2013/08/26/introducing-autobench-next)\n - [ ] [Go 1.1 on the Cubieboard 2](http://dave.cheney.net/2013/08/06/go-1-1-on-the-cubieboard-2)\n - [ ] [An introduction to cross compilation with Go 1.1](http://dave.cheney.net/2013/07/09/an-introduction-to-cross-compilation-with-go-1-1)\n - [ ] [Introducing profile, super simple profiling for Go programs](http://dave.cheney.net/2013/07/07/introducing-profile-super-simple-profiling-for-go-programs)\n - [ ] [Unofficial Go 1.1.1 tarballs for ARM now available](http://dave.cheney.net/2013/07/02/unofficial-go-1-1-1-tarballs-for-arm-now-available)\n - [ ] [How to write benchmarks in Go](http://dave.cheney.net/2013/06/30/how-to-write-benchmarks-in-go)\n - [ ] [Stress test your Go packages](http://dave.cheney.net/2013/06/19/stress-test-your-go-packages)\n - [ ] [How to install Go 1.1 on CentOS 5.9](http://dave.cheney.net/2013/06/18/how-to-install-go-1-1-on-centos-5)\n - [ ] [You don’t need to set GOROOT, really](http://dave.cheney.net/2013/06/14/you-dont-need-to-set-goroot-really)\n - [ ] [Writing table driven tests in Go](http://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)\n - [ ] [How Go uses Go to build itself](http://dave.cheney.net/2013/06/04/how-go-uses-go-to-build-itself)\n - [ ] [Why is a Goroutine’s stack infinite ?](http://dave.cheney.net/2013/06/02/why-is-a-goroutines-stack-infinite)\n - [ ] [Go 1.1 performance improvements, part 3](http://dave.cheney.net/2013/05/28/go-11-performance-improvements-part-3)\n - [ ] [Go 1.1 performance improvements, part 2](http://dave.cheney.net/2013/05/25/go-11-performance-improvements-part-2)\n - [ ] [Go 1.1 performance improvements](http://dave.cheney.net/2013/05/21/go-11-performance-improvements)\n - [ ] [Go 1.1 tarballs for linux/arm](http://dave.cheney.net/2013/05/20/go-11-tarballs-for-linux-arm)\n - [ ] [Go and Juju at Canonical slides posted](http://dave.cheney.net/2013/05/11/go-and-juju-at-canonical-slides-posted)\n - [ ] [Curious Channels](http://dave.cheney.net/2013/04/30/curious-channels)\n - [ ] [What is the zero value, and why is it useful ?](http://dave.cheney.net/2013/01/19/what-is-the-zero-value-and-why-is-it-useful)\n - [ ] [Go, the language for emulators](http://dave.cheney.net/2013/01/09/go-the-language-for-emulators)\n - [ ] [Testing Go on the Raspberry Pi running FreeBSD](http://dave.cheney.net/2012/12/31/testing-go-on-the-raspberry-pi-running-freebsd)\n - [ ] [The Go Programming Language (2009)](http://dave.cheney.net/2012/11/18/the-go-programming-language-2009)\n - [ ] [Notes on exploring the compiler flags in the Go compiler suite](http://dave.cheney.net/2012/10/07/notes-on-exploring-the-compiler-flags-in-the-go-compiler-suite)\n - [ ] [Mikio Hara’s ipv4 package](http://dave.cheney.net/2012/09/27/mikio-haras-ipv4-package)\n - [ ] [Installing Go on the Raspberry Pi](http://dave.cheney.net/2012/09/25/installing-go-on-the-raspberry-pi)\n - [ ] [An introduction to cross compilation with Go](http://dave.cheney.net/2012/09/08/an-introduction-to-cross-compilation-with-go)\n - [ ] [Another go at the Next Big Language](http://dave.cheney.net/2012/09/03/another-go-at-the-next-big-language)\n - [ ] [August Go meetup slides](http://dave.cheney.net/2012/08/19/august-go-meetup-slides)\n - [ ] [How the Go language improves expressiveness without sacrificing runtime performance](http://dave.cheney.net/2012/02/11/how-the-go-language-improves-expressiveness-without-sacrificing-runtime-performance)\n - [ ] [Introducing gmx, runtime instrumentation for Go applications](http://dave.cheney.net/2012/02/05/introducing-gmx-runtime-instrumentation-for-go-applications)\n - [ ] [Why Go gets exceptions right](http://dave.cheney.net/2012/01/18/why-go-gets-exceptions-right)\n - [ ] [Three new SSH client features in Go weekly.2011-11-18](http://dave.cheney.net/2011/11/21/three-new-ssh-client-features-in-go-weekly-2011-11-18)\n - [ ] [Scratching my own itch, or how to publish multicast DNS records in Go](http://dave.cheney.net/2011/10/15/scratching-my-own-itch-or-how-to-publish-multicast-dns-records-in-go)\n - [ ] [Simple extended attribute support for Go](http://dave.cheney.net/2011/07/31/simple-extended-attribute-support-for-go)\n - [ ] [Using Clang 2.9 to build Google Go](http://dave.cheney.net/2011/06/07/using-clang-2-9-to-build-google-go)\n - [ ] [Netgear Stora as an ARM development platform](http://dave.cheney.net/2011/03/31/netgear-stora-as-an-arm-development-platform)\n - [ ] [Using Multicast UDP in Go](http://dave.cheney.net/2011/02/19/using-multicast-udp-in-go)\n - [ ] [ack! and Go source files](http://dave.cheney.net/2011/02/11/ack-and-go-source-files)\n - [ ] [How to run godoc under launchd on OS X](http://dave.cheney.net/2010/11/21/how-to-run-godoc-under-launchd-on-os-x)\n - [ ] [How to dial remote SSL/TLS services in Go](http://dave.cheney.net/2010/10/05/how-to-dial-remote-ssltls-services-in-go)\n\n#### Others\n\n - [ ] [How to use interfaces in Go](http://jordanorelli.com/post/32665860244/how-to-use-interfaces-in-go)\n\n\n## Wiki\n\n - [ ] [Learn Concurrency](https://github.com/golang/go/wiki/LearnConcurrency)\n\n\n## [Talks](https://talks.golang.org/)\n\n#### 2007\n\n - [ ] [Advanced Topics in Programming Languages: Concurrency/message passing Newsqueak](https://www.youtube.com/watch?v=hB05UFqOtFA)\n\n#### 2009\n\n - [x] [The Go Programming Language](https://www.youtube.com/watch?v=rKnDgT73v8s)\n\n#### 2010\n\n - [x] [Another Go at Language Design](https://www.youtube.com/watch?v=7VcArS4Wpqk)\n - [x] [Public Static Void](https://www.youtube.com/watch?v=5kj5ApnhPAE)\n - [ ] [Origins of Go Concurrency Style](https://www.youtube.com/watch?v=3DtUzH3zoFo)\n\n#### 2011\n\n - [x] [Rob Pike on Google Go: Concurrency, Type System, Memory Management and GC](http://www.infoq.com/interviews/pike-google-go)\n - [ ] [Rob Pike on Parallelism and Concurrency in Programming Languages](http://www.infoq.com/interviews/pike-concurrency)\n - [ ] [Another Go at Language Design](https://www.youtube.com/watch?v=aIgyp5nvdqc)\n - [x] [Writing Web Apps in Go](https://www.youtube.com/watch?v=-i0hat7pdpk)\n - [ ] [Practical Go Programming](https://www.youtube.com/watch?v=2-pPAvqyluI)\n\n#### 2012\n\n - [ ] [Go Concurrency Patterns](https://www.youtube.com/watch?v=f6kdp27TYZs) ([slides](http://talks.golang.org/2012/concurrency.slide))\n - [ ] [Go: a simple programming environment](http://vimeo.com/53221558) ([slides](http://talks.golang.org/2012/simple.slide))\n - [ ] [Concurrency Is Not Parallelism](http://vimeo.com/49718712)\n - [x] [Going Go](https://www.youtube.com/watch?v=on5DeUyWDqI)\n - [x] [The path to Go 1](https://www.youtube.com/watch?v=bj9T2c2Xk_s)\n - [x] [Meet the Go Team](https://www.youtube.com/watch?v=sln-gJaURzk)\n\n#### 2013\n\n - [ ] [Advanced Go Concurrency Patterns](https://www.youtube.com/watch?v=QDDwwePbDtw) ([slides](http://talks.golang.org/2013/advconc.slide))\n - [ ] [Go at Google](http://www.infoq.com/presentations/Go-Google)\n - [x] [Fireside Chat with the Go Team](https://www.youtube.com/watch?v=p9VUCp98ay4)\n\n#### 2014\n\n - [x] [Toward Go 1.3, and beyond](https://www.youtube.com/watch?v=mQ4hwLgSvUs)\n - [x] [Opening Keynote by Rob Pike](https://www.youtube.com/watch?v=VoS7DsT1rdM)\n - [x] [Implementing a bignum calculator](https://www.youtube.com/watch?v=PXoG0WX0r_E)\n\n#### 2015\n\n - [x] [GopherFest 2015](https://www.youtube.com/watch?v=Fx304EfqtMo)\n - [ ] [Go and the Modern Enterprise](https://www.youtube.com/watch?v=iFR_7AKkJFU)\n - [x] [Gophercon - Prometheus: Designing and Implementing a Modern Monitoring Solution in Go](https://www.youtube.com/watch?v=1V7eJ0jN8-E)\n - [x] [GopherCon - Uptime: Building Resilient Services with Go](https://www.youtube.com/watch?v=PyBJQA4clfc)\n\n\n## Screencasts\n\n - [x] [Writing, building, installing, and testing Go code](https://www.youtube.com/watch?v=XCsL89YtqCs)\n\n\n## [Code Walks](http://golang.org/doc/codewalk/)\n\n - [ ] [Share Memory By Communicating](http://golang.org/doc/codewalk/sharemem/)\n - [ ] [First-Class Functions in Go](http://golang.org/doc/codewalk/functions/)\n" }, { "alpha_fraction": 0.5611729025840759, "alphanum_fraction": 0.5642062425613403, "avg_line_length": 15.483333587646484, "blob_id": "aca095a83821eb6a6a721e52b10cd35d45ae56af", "content_id": "a95437578e4df5147a93c9ee09464b027bc6dfdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 989, "license_type": "no_license", "max_line_length": 79, "num_lines": 60, "path": "/algorithms/graph/dfs.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\ntype dfsData struct {\n\tparent int\n\tstart int\n\tend int\n}\n\nfunc DFS(g Graph, s int, f func(Node)) {\n\tvar time int\n\n\tdata := make(map[int]*dfsData)\n\n\tdata[s] = &dfsData{\n\t\tparent: -1,\n\t}\n\tdfsVisit(g, s, f, data, &time)\n\n\tfor i := range g.Nodes {\n\t\tif _, ok := data[i]; !ok {\n\t\t\tdata[i] = &dfsData{\n\t\t\t\tparent: -1,\n\t\t\t}\n\t\t\tdfsVisit(g, i, f, data, &time)\n\t\t}\n\t}\n}\n\nfunc dfsVisit(g Graph, s int, f func(Node), data map[int]*dfsData, time *int) {\n\t*time++\n\tdata[s].start = *time\n\tdefer func() {\n\t\tf(g.Nodes[s])\n\t\t*time++\n\t\tdata[s].end = *time\n\t}()\n\n\tfor i, e := range g.Edges[s] {\n\t\tif _, ok := data[e.Target]; !ok {\n\t\t\tdata[e.Target] = &dfsData{\n\t\t\t\tparent: s,\n\t\t\t}\n\t\t\tdfsVisit(g, e.Target, f, data, time)\n\t\t\tg.Edges[s][i].Type = TreeEdge\n\t\t\tcontinue\n\t\t}\n\n\t\tif data[e.Target].end == 0 {\n\t\t\tg.Edges[s][i].Type = BackwardEdge\n\t\t\tcontinue\n\t\t}\n\n\t\tif data[e.Target].start > data[s].start {\n\t\t\tg.Edges[s][i].Type = ForwardEdge\n\t\t\tcontinue\n\t\t}\n\n\t\tg.Edges[s][i].Type = CrossEdge\n\t}\n}\n" }, { "alpha_fraction": 0.6854838728904724, "alphanum_fraction": 0.7016128897666931, "avg_line_length": 30, "blob_id": "0bf6da84b3606a92d14d986c7b8bdd1307ec5d54", "content_id": "a2c9843dffa3e26c633a73b37714d9ccf5ea3848", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 124, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/elasticsearch-experiment/docker-up.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/bash\ncontainer=$(docker run -Pd elasticsearch)\necho $container\ndocker port $container | head -n 1 | awk '{print $3}'\n" }, { "alpha_fraction": 0.6022304892539978, "alphanum_fraction": 0.613382875919342, "avg_line_length": 19.69230842590332, "blob_id": "84294d2b0277c5143cc8c224921e0e98df05f5a9", "content_id": "f9f07665fecdabe67725be3bdce408121ccc90e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 269, "license_type": "no_license", "max_line_length": 63, "num_lines": 13, "path": "/algorithms/sort/heap.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort\n\nimport \"github.com/jasonkeene/playground/data-structures/queue\"\n\nfunc Heap(a []int) {\n\tq := queue.NewHeap()\n\tfor _, v := range a {\n\t\tq.Insert(queue.Element{Key: float64(v), Value: v})\n\t}\n\tfor i := 0; !q.Empty(); i++ {\n\t\ta[i] = q.PopMin().Value.(int)\n\t}\n}\n" }, { "alpha_fraction": 0.7663043737411499, "alphanum_fraction": 0.7663043737411499, "avg_line_length": 15.727272987365723, "blob_id": "45ad7bea2bca186ed575a79ce9836ad983e5036a", "content_id": "aff9b5752e803268c446809c6ec1adc03683feb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 184, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/algorithms/sort/selection_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort_test\n\nimport (\n\t\"testing\"\n\n\tsort \"github.com/jasonkeene/playground/algorithms/sort\"\n)\n\nfunc TestSelectionCorrectness(t *testing.T) {\n\ttestCorrectness(t, sort.Selection)\n}\n" }, { "alpha_fraction": 0.5566878914833069, "alphanum_fraction": 0.5643312335014343, "avg_line_length": 13.811320304870605, "blob_id": "687c26a0585fb54f62efd3eb754ae5eb270ebbbb", "content_id": "530dac39d4ab84ca4bf96f700ed1c452a835b5c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 785, "license_type": "no_license", "max_line_length": 49, "num_lines": 53, "path": "/algorithms/graph/negative_weight_cycle.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\nimport \"sort\"\n\nfunc DetectNegativeWeightCycle(\n\ts int,\n\tg Graph,\n\tshortest []float64,\n\tprev []int,\n) []int {\n\tvar (\n\t\tpresent bool\n\t\tv int\n\t)\n\tfor u, es := range g.Edges {\n\t\tfor _, e := range es {\n\t\t\tif shortest[u]+e.Weight < shortest[e.Target] {\n\t\t\t\tpresent = true\n\t\t\t\tv = e.Target\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif !present {\n\t\treturn nil\n\t}\n\n\tvisited := make([]bool, len(shortest))\n\tfor !visited[v] {\n\t\tvisited[v] = true\n\t\tv = prev[v]\n\t}\n\n\tvisited = make([]bool, len(shortest))\n\tinvalid := make([]int, 0)\n\tq := []int{v}\n\tfor len(q) > 0 {\n\t\tvar x int\n\t\tx, q = q[0], q[1:len(q)]\n\t\tif visited[x] {\n\t\t\tcontinue\n\t\t}\n\t\tvisited[x] = true\n\t\tinvalid = append(invalid, x)\n\n\t\tfor _, e := range g.Edges[x] {\n\t\t\tq = append(q, e.Target)\n\t\t}\n\t}\n\n\tsort.Ints(invalid)\n\treturn invalid\n}\n" }, { "alpha_fraction": 0.644957959651947, "alphanum_fraction": 0.651260495185852, "avg_line_length": 18.79166603088379, "blob_id": "8231ade9d8879f3f9b06774d1806fe8bcfcea7ad", "content_id": "e5c064ffe592d049bb9e5be1e9033922557a859f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 476, "license_type": "no_license", "max_line_length": 39, "num_lines": 24, "path": "/cpp-how-to-program/chapter9/Time.h", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n#ifndef TIME_H\n#define TIME_H\n\nclass Time\n{\npublic:\n explicit Time(int=0, int=0, int=0);\n ~Time();\n void setTime(int, int, int);\n void setHour(int);\n unsigned int getHour() const;\n void setMinute(int);\n unsigned int getMinute() const;\n void setSecond(int);\n unsigned int getSecond() const;\n void printUniversal() const;\n void printStandard() const;\nprivate:\n unsigned int hour;\n unsigned int minute;\n unsigned int second;\n};\n\n#endif\n" }, { "alpha_fraction": 0.7064056992530823, "alphanum_fraction": 0.7117437720298767, "avg_line_length": 15.529411315917969, "blob_id": "2b341c7607f7fec81351fcf34402f23ce9112d4f", "content_id": "a96a0436e1a0eb8345b8f72df994ed04c3396646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 562, "license_type": "no_license", "max_line_length": 55, "num_lines": 34, "path": "/saltstack/prototypes/bin/bootstrap-app.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# update packages\n#apt-get update\n#apt-get upgrade --yes\n\n# install salt minion\n/vagrant/bin/install-minion.sh\n\n# configure salt minion to pull from master\ncat << EOF > /etc/salt/minion\nmaster: $1\ngrains:\n roles:\n - app\n access:\n - operations\n - developers\nEOF\necho $2 > /etc/salt/minion_id\n\n# copy over test_key\nmkdir -p /etc/salt/pki/minion\ncp /vagrant/salt/minion.{pem,pub} /etc/salt/pki/minion/\n\n# set hostname\nsalt-call network.mod_hostname $2\n\n# start miniond\nservice salt-minion start\n\necho\necho\necho \"Provision complete!\"\n" }, { "alpha_fraction": 0.5644699335098267, "alphanum_fraction": 0.5845271944999695, "avg_line_length": 23.34883689880371, "blob_id": "1fb994be2aa42971d7f76de2cd8c8867a7685fe7", "content_id": "70991e9ff785b4615ddbb631518dbe52f9188836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 57, "num_lines": 43, "path": "/zeromq-the-guide/chapter1/ventilator.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n void *context = zmq_ctx_new();\n\n // socket to send messages on\n void *sender = zmq_socket(context, ZMQ_PUSH);\n zmq_bind(sender, \"tcp://*:5557\");\n\n // socket to send start of batch message on\n void *sink = zmq_socket(context, ZMQ_PUSH);\n zmq_connect(sink, \"tcp://localhost:5558\");\n\n printf(\"Press enter to start messages...\\n\");\n getchar();\n printf(\"Sending tasks to workers.\\n\");\n\n // seed random number generator\n srandom((unsigned)time(NULL));\n\n // signal sink to start counting\n s_send(sink, \"0\");\n\n int i;\n int total_msec = 0;\n for (i = 0; i < 100; i++) {\n int workload;\n workload = randof(100) + 1;\n total_msec += workload;\n printf(\"%d\\n\", total_msec);\n char string[10];\n snprintf(string, sizeof(string), \"%d\", workload);\n s_send(sender, string);\n }\n printf(\"Total expected cost: %d msec\\n\", total_msec);\n\n zmq_close(sender);\n zmq_ctx_destroy(context);\n return 0;\n}\n" }, { "alpha_fraction": 0.3992805778980255, "alphanum_fraction": 0.4568345248699188, "avg_line_length": 22.16666603088379, "blob_id": "cc64feaa02a3ea14381a1319448bac40287bb94a", "content_id": "0644b0039c2cf38e817354000b8c3ff9d569c641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/leetcode/pascals-triangle/test_solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "from solution import pascals_triangle\n\n\nclass DescribePascalsTriangle:\n def it_builds_a_valid_triangle(self):\n assert pascals_triangle(5) == [\n [1],\n [1, 1],\n [1, 2, 1],\n [1, 3, 3, 1],\n [1, 4, 6, 4, 1],\n ]\n" }, { "alpha_fraction": 0.6338329911231995, "alphanum_fraction": 0.6445395946502686, "avg_line_length": 19.30434799194336, "blob_id": "d0926d1ade332f8168179c691448d2cc0028eb2d", "content_id": "761f4af8a4b81417a242219092dee559b2d0610b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 65, "num_lines": 23, "path": "/zerorpc-streaming/rando.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "import hashlib\nimport itertools\nfrom random import SystemRandom\nimport time\n\nimport gevent\nimport zerorpc\n\n\ndef random_hash():\n return hashlib.sha1(str(SystemRandom().random())).hexdigest()\n\n\[email protected]\ndef stream_randomness():\n for i in itertools.count():\n rando = random_hash()\n print \"yielding\", rando\n yield rando\n if i == 7:\n print \"simulating network outage\"\n time.sleep(5)\n gevent.sleep(0.5)\n" }, { "alpha_fraction": 0.5444444417953491, "alphanum_fraction": 0.5592592358589172, "avg_line_length": 17.620689392089844, "blob_id": "f04c0b54af8718137b86c367e3f8361d93248264", "content_id": "734108a6d586020ff4854d27c96c4670a8d47472", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 540, "license_type": "no_license", "max_line_length": 52, "num_lines": 29, "path": "/zeromq-the-guide/chapter2/rrworker.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n\n#include <zmq.h>\n\n#include \"zhelpers.h\"\n\n\nint main(void)\n{\n void *context = zmq_ctx_new();\n\n void *responder = zmq_socket(context, ZMQ_REP);\n zmq_connect(responder, \"tcp://localhost:22222\");\n\n while (1) {\n char *string = s_recv(responder);\n printf(\"Got \\\"%s\\\"\\n\", string);\n free(string);\n sleep(1);\n printf(\"Sending \\\"%s\\\"\\n\", \"World\");\n s_send(responder, \"World\");\n }\n\n zmq_close(responder);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6811926364898682, "alphanum_fraction": 0.6811926364898682, "avg_line_length": 20.75, "blob_id": "0314b79cc1fe185b85c534359dafc0621328a1af", "content_id": "7355449b2a58100c08e02bb4117afe54bd917f33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 436, "license_type": "no_license", "max_line_length": 112, "num_lines": 20, "path": "/zeromq-the-guide/chapter2/zeroapi/Makefile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nall: client server\n.PHONY: all\n\nbin:\n\t[ ! -d bin ] && mkdir bin\n\nlib:\n\t[ ! -d lib ] && mkdir lib\n\nutils: lib\n\tclang -Wall -c utils.c -o lib/utils.o\n\nclient: bin utils\n\tclang -Wall -Wno-unused-function lib/utils.o client.c -o bin/client -L/opt/local/lib -I/opt/local/include -lzmq\n\nserver: bin utils\n\tclang -Wall -Wno-unused-function lib/utils.o server.c -o bin/server -L/opt/local/lib -I/opt/local/include -lzmq\n\nclean:\n\trm -r bin lib\n" }, { "alpha_fraction": 0.7167701721191406, "alphanum_fraction": 0.7167701721191406, "avg_line_length": 31.15999984741211, "blob_id": "65654ba1a4831b811757c8c8c0e021cf657eebc1", "content_id": "e1abe9ed784c2dda31ff1800572e01128a20c207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 805, "license_type": "no_license", "max_line_length": 108, "num_lines": 25, "path": "/zeromq-the-guide/chapter2/Makefile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nall: ventilator worker sink rr\n.PHONY: all\n\nbin:\n\t[ ! -d bin ] && mkdir bin\n\nlib:\n\t[ ! -d lib ] && mkdir lib\n\nventilator: bin\n\tclang -Wall -Wno-unused-function ventilator.c -o bin/ventilator -L/opt/local/lib -I/opt/local/include -lzmq\n\nworker: bin\n\tclang -Wall -Wno-unused-function worker.c -o bin/worker -L/opt/local/lib -I/opt/local/include -lzmq\n\nsink: bin\n\tclang -Wall -Wno-unused-function sink.c -o bin/sink -L/opt/local/lib -I/opt/local/include -lzmq\n\nrr: bin\n\tclang -Wall -Wno-unused-function rrclient.c -o bin/rrclient -L/opt/local/lib -I/opt/local/include -lzmq\n\tclang -Wall -Wno-unused-function rrbroker.c -o bin/rrbroker -L/opt/local/lib -I/opt/local/include -lzmq\n\tclang -Wall -Wno-unused-function rrworker.c -o bin/rrworker -L/opt/local/lib -I/opt/local/include -lzmq\n\nclean:\n\trm -r bin lib\n" }, { "alpha_fraction": 0.3781512677669525, "alphanum_fraction": 0.6092436909675598, "avg_line_length": 14.800000190734863, "blob_id": "efb89648aaacdbc4f9a8fcfbd6ee0af9c9e46d50", "content_id": "faa9a926030d92df03160cef6f62b8d730abdf36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 238, "license_type": "no_license", "max_line_length": 26, "num_lines": 15, "path": "/x86-64-assembly-language-programming-with-ubuntu/exercises/ch-4-quiz.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n1. yasm\n2. ;\n3. .data\n4. .bss\n5. .text\n6. 1. bNum db 10\n 2. wNum dw 10291\n 3. dwNum dd 2126010\n 4. qwNum dq 10000000000\n7. 1. bArr resb 100\n 2. wArr resw 3000\n 3. dwArr resd 200\n 4. qArr resq 5000\n8. global _start\n _start:\n" }, { "alpha_fraction": 0.6692913174629211, "alphanum_fraction": 0.6929134130477905, "avg_line_length": 49.599998474121094, "blob_id": "4272dc44cf8d2c8c4d9d1538d40a7570616f12e1", "content_id": "8ea302b6dc2c2cbde8aa0f3a2084d27d0be9bcb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 256, "license_type": "no_license", "max_line_length": 99, "num_lines": 5, "path": "/css-grid/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## Guides\n\n- [x] [CSS Grid – Table layout is back](https://developers.google.com/web/updates/2017/01/css-grid)\n- [ ] [A Complete Guide to Grid](https://css-tricks.com/snippets/css/complete-guide-grid/)\n- [ ] [Grid by Example](http://gridbyexample.com/)\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7185184955596924, "avg_line_length": 9.384614944458008, "blob_id": "a06dbc3045e0d4af8735a882af87f39e7d789c67", "content_id": "f86173c01f131d0f8f2417a0a533160ecde1b475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 270, "license_type": "no_license", "max_line_length": 21, "num_lines": 26, "path": "/algorithms/graph/graph.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package graph\n\ntype Graph struct {\n\tNodes []Node\n\tEdges [][]Edge\n}\n\ntype Node struct {\n\tLabel string\n}\n\ntype Edge struct {\n\tTarget int\n\tWeight float64\n\tType EdgeType\n}\n\ntype EdgeType int\n\nconst (\n\tNone EdgeType = iota\n\tTreeEdge\n\tForwardEdge\n\tBackwardEdge\n\tCrossEdge\n)\n" }, { "alpha_fraction": 0.5963541865348816, "alphanum_fraction": 0.6223958134651184, "avg_line_length": 11, "blob_id": "ca5f32bffafeca26b9b3660a2ae766fcaa2d9c15", "content_id": "8d3ee643f98ad81321e733ad4829f404110c95fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 384, "license_type": "no_license", "max_line_length": 56, "num_lines": 32, "path": "/frida/tracee/main.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"golang.org/x/text/message\"\n)\n\nvar (\n\tcount uint64\n\tp = message.NewPrinter(message.MatchLanguage(\"en\"))\n)\n\nfunc f() {\n\tatomic.AddUint64(&count, 1)\n}\n\nfunc reportCounters() {\n\tfor {\n\t\tc := atomic.SwapUint64(&count, 0)\n\t\tp.Printf(\"%15d ops/s\\n\", c)\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc main() {\n\tgo reportCounters()\n\tfor {\n\t\tf()\n\t}\n}\n" }, { "alpha_fraction": 0.7228915691375732, "alphanum_fraction": 0.7550200819969177, "avg_line_length": 19.75, "blob_id": "48b5c775f73cc2f5ba199c65df5d61606853172a", "content_id": "2cf30c038c79cfcefacc61ca1565a66a2e855dca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 249, "license_type": "no_license", "max_line_length": 64, "num_lines": 12, "path": "/the-go-programming-language/ch3/src/surface/README.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "To run the server:\n\n`go run main.go`\n\nTo view the surface:\n\n`http://localhost:8080/`\n\nTo view the surface animated (must be in a browser that supports\nmultipart/x-mixed-replace which Chrome unfortunately does not):\n\n`http://localhost:8080/animated`\n" }, { "alpha_fraction": 0.6058449149131775, "alphanum_fraction": 0.6346946358680725, "avg_line_length": 17.9290771484375, "blob_id": "ee8ccea2610fb96a231be9e394ea1f2e21d2a39f", "content_id": "a9fb7559cfe0ba3807a203f21069e5cd12ac8ffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2669, "license_type": "no_license", "max_line_length": 77, "num_lines": 141, "path": "/data-structures/queue/priority_queue_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package queue_test\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/data-structures/queue\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc TestHeapConstructor(t *testing.T) {\n\ttestConstructor(t, func(qes ...queue.Element) queue.Priority {\n\t\treturn queue.NewHeap(qes...)\n\t})\n}\n\nfunc TestArrayConstructor(t *testing.T) {\n\ttestConstructor(t, func(qes ...queue.Element) queue.Priority {\n\t\treturn queue.NewArray(qes...)\n\t})\n}\n\nfunc testConstructor(t *testing.T, f func(...queue.Element) queue.Priority) {\n\th := f([]queue.Element{\n\t\t{Key: 0},\n\t\t{Key: 1},\n\t\t{Key: 2},\n\t\t{Key: 3},\n\t\t{Key: math.Inf(1)},\n\t}...)\n\th.Insert(queue.Element{Key: 1.5})\n\th.Insert(queue.Element{Key: 2.5})\n\th.Insert(queue.Element{Key: 9})\n\th.Insert(queue.Element{Key: 2.1})\n\n\tresult := make([]float64, 0, 6)\n\tfor !h.Empty() {\n\t\tresult = append(result, h.PopMin().Key)\n\t}\n\n\texpected := []float64{0, 1, 1.5, 2, 2.1, 2.5, 3, 9, math.Inf(1)}\n\tif !cmp.Equal(result, expected) {\n\t\tt.Fatal(cmp.Diff(result, expected))\n\t}\n}\n\nfunc TestHeapReturnsMins(t *testing.T) {\n\ttestReturnsMins(t, queue.NewHeap(), 100000)\n}\n\nfunc TestArrayReturnsMins(t *testing.T) {\n\ttestReturnsMins(t, queue.NewArray(), 10000)\n}\n\nfunc testReturnsMins(t *testing.T, q queue.Priority, count int) {\n\tnums := make([]float64, count)\n\tfor i := 0; i < count; i++ {\n\t\tnums[i] = rand.Float64()\n\t}\n\n\tfor _, v := range nums {\n\t\tq.Insert(queue.Element{Key: v})\n\t}\n\n\tsort.Float64s(nums)\n\n\tresult := make([]float64, 0, count)\n\tfor !q.Empty() {\n\t\tresult = append(result, q.PopMin().Key)\n\t}\n\n\tif !reflect.DeepEqual(result, nums) {\n\t\tt.Fatalf(\"Priority did not return mins in the correct order\")\n\t}\n}\n\nfunc TestHeapDecrease(t *testing.T) {\n\ttestDecrease(t, queue.NewHeap())\n}\n\nfunc TestArrayDecrease(t *testing.T) {\n\ttestDecrease(t, queue.NewArray())\n}\n\nfunc testDecrease(t *testing.T, q queue.Priority) {\n\tq.Insert(queue.Element{\n\t\tKey: 1,\n\t\tValue: 1,\n\t})\n\tq.Insert(queue.Element{\n\t\tKey: 3,\n\t\tValue: 3,\n\t})\n\tq.Insert(queue.Element{\n\t\tKey: 6,\n\t\tValue: 6,\n\t})\n\tq.Insert(queue.Element{\n\t\tKey: 7,\n\t\tValue: 7,\n\t})\n\tq.Insert(queue.Element{\n\t\tKey: 10,\n\t\tValue: 10,\n\t})\n\tq.Insert(queue.Element{\n\t\tKey: 11,\n\t\tValue: 11,\n\t})\n\n\tconst v = \"some uniqe value\"\n\tq.Insert(queue.Element{\n\t\tKey: 10,\n\t\tValue: v,\n\t})\n\tq.Decrease(queue.Element{\n\t\tKey: 7,\n\t\tValue: v,\n\t})\n\tq.Decrease(queue.Element{\n\t\tKey: 4,\n\t\tValue: v,\n\t})\n\n\tresult := make([]queue.Element, 0, 7)\n\tfor !q.Empty() {\n\t\tresult = append(result, q.PopMin())\n\t}\n\n\tif result[2].Key != 4 || result[2].Value.(string) != v {\n\t\tt.Fatalf(\"Priority did not return correct element: %v\", result[2])\n\t}\n}\n" }, { "alpha_fraction": 0.371335506439209, "alphanum_fraction": 0.371335506439209, "avg_line_length": 37.125, "blob_id": "7002e8fc2261edd3f3e1849c480f9124d23e0945", "content_id": "da024d28a2fe81d07a6cd365cd9a4049a9caf05d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/pickle_rename/test_lib.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n\nclass Foo(object):\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"%s at %s instance of %s.%s\" % (self.name, id(self),\n type(self).__module__,\n type(self).__name__)\n" }, { "alpha_fraction": 0.5916370153427124, "alphanum_fraction": 0.6129893064498901, "avg_line_length": 21.019607543945312, "blob_id": "bd53bf2a7a26e8cfdb4bd5690e530cb6849dc870", "content_id": "2073e4dedd467a47c13fde7517f8ee33510dbc31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 78, "num_lines": 51, "path": "/zeromq-the-guide/chapter1/weather.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"weather.h\"\n\n\n#define WEATHER_FMT \"Weather{%05d, %.1f, %.2f}\"\n\n\nWeather *Weather_create(int zip, float temp, float humidity)\n{\n Weather *weather = malloc(sizeof(Weather));\n weather->zip = zip;\n weather->temp = temp;\n weather->humidity = humidity;\n weather->str = NULL;\n Weather_str(weather);\n return weather;\n}\n\n\nWeather *Weather_create_fake()\n{\n return Weather_create(arc4random() % 99999,\n (int)(arc4random() % 215) - 80,\n (int)(arc4random() % 50) + 10);\n}\n\n\nvoid Weather_destroy(Weather *weather)\n{\n if (weather->str != NULL) free(weather->str);\n free(weather);\n}\n\n\nvoid Weather_print(Weather *weather)\n{\n printf(WEATHER_FMT\"\\n\", weather->zip, weather->temp,\n weather->humidity);\n}\n\n\nvoid Weather_str(Weather *weather)\n{\n if (weather->str != NULL) free(weather->str);\n char buffer[256];\n snprintf(buffer, sizeof(buffer), WEATHER_FMT, weather->zip, weather->temp,\n weather->humidity);\n weather->str = strndup(buffer, sizeof(buffer));\n}\n\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 17.47058868408203, "blob_id": "e28fa4000483e99485bdda0f4bc31cfa779f4a93", "content_id": "1d88b7c62be9b88cb3303572fc9c653e634328f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 315, "license_type": "no_license", "max_line_length": 61, "num_lines": 17, "path": "/zeromq-the-guide/chapter1/weather.h", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\ntypedef struct Weather {\n int zip;\n float temp;\n float humidity;\n char *str;\n} Weather;\n\n\nWeather *Weather_create(int zip, float temp, float humidity);\n\nWeather *Weather_create_fake();\n\nvoid Weather_destroy(Weather *weather);\n\nvoid Weather_print(Weather *weather);\n\nvoid Weather_str(Weather *weather);\n" }, { "alpha_fraction": 0.3996763825416565, "alphanum_fraction": 0.4530744254589081, "avg_line_length": 13.714285850524902, "blob_id": "24c606b1fa9af76203a590c2e31a41915f8b162c", "content_id": "7d6e69e2f9723022a14914f0abb3c877bfe90d4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 618, "license_type": "no_license", "max_line_length": 45, "num_lines": 42, "path": "/algorithms/sort/merge.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort\n\nimport \"math\"\n\nfunc Merge(a []int) {\n\tm(a, 0, len(a))\n}\n\nfunc m(a []int, q, r int) {\n\tif q >= r-1 {\n\t\treturn\n\t}\n\tmid := (q + r) / 2\n\tm(a, q, mid)\n\tm(a, mid, r)\n\tmerge(a, q, mid, r)\n}\n\nfunc merge(a []int, q, mid, r int) {\n\ta1 := make([]int, mid-q+1)\n\tfor i, j := q, 0; i < mid; i, j = i+1, j+1 {\n\t\ta1[j] = a[i]\n\t}\n\ta1[mid-q] = math.MaxInt64\n\n\ta2 := make([]int, r-mid+1)\n\tfor i, j := mid, 0; i < r; i, j = i+1, j+1 {\n\t\ta2[j] = a[i]\n\t}\n\ta2[r-mid] = math.MaxInt64\n\n\tvar a1c, a2c int\n\tfor i := q; i < r; i++ {\n\t\tif a1[a1c] < a2[a2c] {\n\t\t\ta[i] = a1[a1c]\n\t\t\ta1c++\n\t\t} else {\n\t\t\ta[i] = a2[a2c]\n\t\t\ta2c++\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.551948070526123, "alphanum_fraction": 0.5670995712280273, "avg_line_length": 13.4375, "blob_id": "a352cf27dbd7b52a7a686b926b0b49ab1fa5d06d", "content_id": "a5d060f0c5375ac7db6f0ac901a7ab4392c03de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 462, "license_type": "no_license", "max_line_length": 37, "num_lines": 32, "path": "/golang/another_go_at_language_design/type_switch.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\ntype Thingy struct{}\n\nfunc main() {\n\tch := make(chan interface{})\n\tgo func() {\n\t\tch <- 1\n\t\tch <- 1.1\n\t\tch <- \"test\"\n\t\tch <- true\n\t\tch <- new(Thingy)\n\t\tch <- Thingy{}\n\t\tclose(ch)\n\t}()\n\tfor x := range ch {\n\t\tswitch x.(type) {\n\t\tcase bool:\n\t\t\tfmt.Println(\"bool\")\n\t\tcase string:\n\t\t\tfmt.Println(\"string\")\n\t\tcase int:\n\t\t\tfmt.Println(\"int\")\n\t\tcase float64:\n\t\t\tfmt.Println(\"float64\")\n\t\tdefault:\n\t\t\tfmt.Printf(\"unknown type %T\\n\", x)\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5119255185127258, "alphanum_fraction": 0.5206515192985535, "avg_line_length": 37.20000076293945, "blob_id": "1918cef3076cb60fe463fd3a1f99b045e06ed7d9", "content_id": "87b39f0a2cdaa86e8423e4c1bed4e5c536c1328b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1719, "license_type": "no_license", "max_line_length": 99, "num_lines": 45, "path": "/cpp-how-to-program/chapter8/sizeof.cpp", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nint main()\n{\n int x = 12;\n int my_arr[] = {1, 2, 3, 4, 5, 6};\n\n std::cout << \"sizeof x: \" << sizeof x << std::endl;\n std::cout << \"sizeof my_arr: \" << sizeof my_arr << std::endl;\n std::cout << \"sizeof my_arr[0]: \" << sizeof my_arr[0] << std::endl;\n std::cout << \"sizeof my_arr/sizeof my_arr[0]: \" << sizeof my_arr/sizeof my_arr[0] << std::endl;\n\n char c;\n short s;\n int i;\n long l;\n long long ll;\n float f;\n double d;\n long double ld;\n int array[20];\n int *ptr = array;\n\n std::cout << std::endl;\n std::cout << \"sizeof c: \" << sizeof c << std::endl;\n std::cout << \"sizeof (char): \" << sizeof (char) << std::endl;\n std::cout << \"sizeof s: \" << sizeof s << std::endl;\n std::cout << \"sizeof (short): \" << sizeof (short) << std::endl;\n std::cout << \"sizeof i: \" << sizeof i << std::endl;\n std::cout << \"sizeof (int): \" << sizeof (int) << std::endl;\n std::cout << \"sizeof l: \" << sizeof l << std::endl;\n std::cout << \"sizeof (long): \" << sizeof (long) << std::endl;\n std::cout << \"sizeof ll: \" << sizeof ll << std::endl;\n std::cout << \"sizeof (long long): \" << sizeof (long long) << std::endl;\n std::cout << \"sizeof f: \" << sizeof f << std::endl;\n std::cout << \"sizeof (float): \" << sizeof (float) << std::endl;\n std::cout << \"sizeof d: \" << sizeof d << std::endl;\n std::cout << \"sizeof (double): \" << sizeof (double) << std::endl;\n std::cout << \"sizeof ld: \" << sizeof ld << std::endl;\n std::cout << \"sizeof (long double): \" << sizeof (long double) << std::endl;\n std::cout << \"sizeof array: \" << sizeof array << std::endl;\n std::cout << \"sizeof ptr: \" << sizeof ptr << std::endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.558516800403595, "alphanum_fraction": 0.5666280388832092, "avg_line_length": 13.627119064331055, "blob_id": "a6ad4f8cbb0fe263a407b9c40e3ec98da5053e53", "content_id": "9b6e4c67e6f16a505c4c721789f7843912de01db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 863, "license_type": "no_license", "max_line_length": 58, "num_lines": 59, "path": "/golang/defer_panic_and_recover/defer_panic_and_recover.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n// This function's defer can mutate returned variables\nfunc a() (x int) {\n\tx = 0\n\tdefer func() {\n\t\tx++\n\t}()\n\treturn x\n}\n\n// This function's defer can not mutate returned variables\nfunc b() int {\n\tx := 0\n\tdefer func() {\n\t\tx++\n\t}()\n\treturn x\n}\n\nfunc safe_call(work func()) (x int, err error) {\n\tx = 42\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tx = 0\n\t\t\terr = errors.New(e.(string))\n\t\t}\n\t}()\n\twork()\n\treturn\n}\n\nfunc main() {\n\tfmt.Println(a()) // outputs 1\n\tfmt.Println(b()) // outputs 0\n\n\tval, err := safe_call(func() {\n\t\tfmt.Println(\"Doing normal work\")\n\t})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Printf(\"val was: %d\\n\", val)\n\t}\n\n\tval, err = safe_call(func() {\n\t\tpanic(\"OMG!\")\n\t})\n\tif err != nil {\n\t\tfmt.Println(err) // outputs OMG! but doesn't halt\n\t} else {\n\t\tfmt.Printf(\"val was: %d\\n\", val)\n\t}\n}\n" }, { "alpha_fraction": 0.7151051759719849, "alphanum_fraction": 0.7284894585609436, "avg_line_length": 51.099998474121094, "blob_id": "071b883fb8c524424e0cec82e28d2ffbd05a6129", "content_id": "4f033534109a82e21d792d0020251c6bd4027920", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 523, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/haskell-book/ch5/notes.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n - If haskell can not determine types at compile time does it just defer that\n to runtime? If there is not sufficient type information at compile time to\n infer all types does the program fail to compile?\n - (Ord a, Num a) => a -> Ordering\n In the text this says a must have a type that compiles with the\n intersection of Ord an Num but I suspect it is the union of those two\n typeclasses. (p. 153)\n - Ask someone about question 7 (p. 163)\n\n - There are two forms of polymorphism (parametric and constrained)\n" }, { "alpha_fraction": 0.466292142868042, "alphanum_fraction": 0.5056179761886597, "avg_line_length": 18.77777862548828, "blob_id": "2e4ce33a7164972d2fc8fd998afc132fa316912d", "content_id": "0adb1995d475c5afd563158957363ddd36a392e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 356, "license_type": "no_license", "max_line_length": 79, "num_lines": 18, "path": "/golang/rob_pike_talk_2009/untyped_channels.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\n// it occured to me at 36:59 that it would be possible to have untyped channels\nfunc main() {\n ch := make(chan interface {})\n go func () {\n ch <- 1234\n ch <- \"asdf\"\n ch <- 1.3\n ch <- struct {x, y int}{22, 33}\n close(ch)\n }()\n for val := range ch {\n fmt.Println(val)\n }\n}\n" }, { "alpha_fraction": 0.5270541310310364, "alphanum_fraction": 0.5435871481895447, "avg_line_length": 32.266666412353516, "blob_id": "ac47902fc11db860c9995597174ad89029d9c197", "content_id": "ad467158701fb179a0dd801609bdfff1efab2f56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1996, "license_type": "no_license", "max_line_length": 72, "num_lines": 60, "path": "/leetcode/spiral-matrix/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nclass Solution:\n directions = ['right', 'down', 'left', 'up']\n\n def spiralOrder(self, matrix):\n if not matrix:\n return []\n self.matrix = matrix\n self.result = []\n self.position = (0, 0)\n self.direction = 0\n self.width, self.height = len(matrix[0]), len(matrix)\n self.visited = set()\n self.go()\n return self.result\n\n def go(self):\n # continue until you can't move in all four directions\n cant_move = 0\n while cant_move < 4:\n # append current position if it hasn't already been appended\n if self.position not in self.visited:\n self.append_element()\n\n if self.next_element_is_edge():\n cant_move += 1\n self.change_direction()\n else:\n cant_move = 0\n self.position = self.next_position()\n\n def change_direction(self):\n self.direction = (self.direction + 1) % len(self.directions)\n\n def append_element(self):\n self.result.append(self.lookup())\n self.visited.add(self.position)\n\n def next_position(self):\n return {\n 'right': (self.position[0] + 1, self.position[1]),\n 'down': (self.position[0], self.position[1] + 1),\n 'left': (self.position[0] - 1, self.position[1]),\n 'up': (self.position[0], self.position[1] - 1),\n }.get(self.directions[self.direction])\n\n def lookup(self, position=None):\n position = position or self.position\n return self.matrix[position[1]][position[0]]\n\n def next_element_is_edge(self):\n on_edge = {\n 'right': self.position[0] + 1 == self.width,\n 'down': self.position[1] + 1 == self.height,\n 'left': self.position[0] - 1 == -1,\n 'up': self.position[1] - 1 == -1,\n }.get(self.directions[self.direction])\n return on_edge or self.next_position() in self.visited\n" }, { "alpha_fraction": 0.7763158082962036, "alphanum_fraction": 0.7763158082962036, "avg_line_length": 62.33333206176758, "blob_id": "2659afcf5c84201846202cfb553fe49a3ee632c4", "content_id": "f2ca72a0b9c6f725f33d45fe63065a3b2747ec5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 380, "license_type": "no_license", "max_line_length": 78, "num_lines": 6, "path": "/haskell-book/introduction/notes.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "- The human mind lacks short term memory and keeping concepts in your head can\n be difficult.\n- Haskell's strong typing allows the computer to check metadata so you don't\n have to keep it in your own limited memory.\n- It is proposed that conceptually starting from a blank slate is a better way\n to learning Haskell since it is quite different than imperitive languages.\n" }, { "alpha_fraction": 0.506375253200531, "alphanum_fraction": 0.5707346796989441, "avg_line_length": 18.60714340209961, "blob_id": "d361e72c5c2a2aa421b83d01a08e25a21f18c2de", "content_id": "d5c5984193cda09b9bb9f3b20221fac9f4ee6beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1647, "license_type": "no_license", "max_line_length": 55, "num_lines": 84, "path": "/the-go-programming-language/ch3/src/mandelbrot/main.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"image\"\n\t\"image/color\"\n\t\"image/png\"\n\t\"math\"\n\t\"math/cmplx\"\n\t\"os\"\n)\n\nfunc main() {\n\tconst (\n\t\txmin, ymin, xmax, ymax = -2, -2, 2, 2\n\t\twidth, height = 1024, 1024\n\t)\n\timg := image.NewRGBA(image.Rect(0, 0, width, height))\n\n\tfor py := 0; py < height; py++ {\n\t\tfor px := 0; px < width; px++ {\n\t\t\tc1 := mandelbrot(complex(\n\t\t\t\tfloat64(px)/width*(xmax-xmin)+xmin,\n\t\t\t\tfloat64(py)/height*(ymax-ymin)+ymin,\n\t\t\t))\n\t\t\tc2 := mandelbrot(complex(\n\t\t\t\t(float64(px)+0.25)/width*(xmax-xmin)+xmin,\n\t\t\t\t(float64(py)+0.25)/height*(ymax-ymin)+ymin,\n\t\t\t))\n\t\t\tc3 := mandelbrot(complex(\n\t\t\t\t(float64(px)+0.5)/width*(xmax-xmin)+xmin,\n\t\t\t\t(float64(py)+0.5)/height*(ymax-ymin)+ymin,\n\t\t\t))\n\t\t\tc4 := mandelbrot(complex(\n\t\t\t\t(float64(px)+0.75)/width*(xmax-xmin)+xmin,\n\t\t\t\t(float64(py)+0.75)/height*(ymax-ymin)+ymin,\n\t\t\t))\n\n\t\t\timg.Set(px, py, averageColors(c1, c2, c3, c4))\n\t\t}\n\t}\n\tpng.Encode(os.Stdout, img)\n}\n\nfunc mandelbrot(z complex128) color.Color {\n\tconst (\n\t\titerations = 255\n\t\tcontrast = 15\n\t)\n\n\tvar v complex128\n\tfor n := uint8(0); n < iterations; n++ {\n\t\tv = v*v + z\n\t\tif cmplx.Abs(v) > 2 {\n\t\t\treturn color.RGBA{\n\t\t\t\t255 - contrast*n,\n\t\t\t\t255 - contrast*(uint8(math.Abs(float64(n-200)))),\n\t\t\t\t255 - contrast*(-n),\n\t\t\t\t255,\n\t\t\t}\n\t\t}\n\t}\n\treturn color.Black\n}\n\nfunc averageColors(colors ...color.Color) color.Color {\n\tvar r, g, b, a uint32\n\tfor _, c := range colors {\n\t\tlr, lg, lb, la := c.RGBA()\n\t\tr += lr\n\t\tg += lg\n\t\tb += lb\n\t\ta += la\n\t}\n\tr /= uint32(len(colors))\n\tg /= uint32(len(colors))\n\tb /= uint32(len(colors))\n\ta /= uint32(len(colors))\n\treturn color.RGBA{\n\t\tuint8(r >> 8),\n\t\tuint8(g >> 8),\n\t\tuint8(b >> 8),\n\t\tuint8(a >> 8),\n\t}\n}\n" }, { "alpha_fraction": 0.5512979030609131, "alphanum_fraction": 0.5710753798484802, "avg_line_length": 22.114286422729492, "blob_id": "8b682a71b23577d27063ba50a5abd5b3c405ec71", "content_id": "453e5aefbb785cbc9229e8121dc4c260da944dee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 809, "license_type": "no_license", "max_line_length": 60, "num_lines": 35, "path": "/zeromq-the-guide/chapter2/ventilator.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include \"zhelpers.h\"\n\n\nint main(int argc, char *argv[])\n{\n void *context = zmq_ctx_new();\n\n // socket to send messages on\n void *pusher = zmq_socket(context, ZMQ_PUSH);\n char buffer[30];\n snprintf(buffer, sizeof(buffer), \"tcp://*:%s\", argv[1]);\n printf(\"Binding to: %s\\n\", buffer);\n zmq_bind(pusher, buffer);\n\n printf(\"Sending tasks to workers.\\n\");\n\n // seed random number generator\n srandom((unsigned)time(NULL));\n\n for (int i = 0; i < 10000; i++) {\n printf(\"Sending %i\\n\", i);\n int workload;\n workload = randof(10) + 1;\n char string[10];\n snprintf(string, sizeof(string), \"%d\", workload);\n s_send(pusher, string);\n s_sleep(2);\n }\n\n zmq_close(pusher);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.707442045211792, "alphanum_fraction": 0.7604723572731018, "avg_line_length": 178.52000427246094, "blob_id": "95f5ab1efa67d952ff5ec718299da51686a61be1", "content_id": "43fe680122e1f3ca54b0dad1105fb16b8c8c66f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4488, "license_type": "no_license", "max_line_length": 289, "num_lines": 25, "path": "/README.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "A place for experiments, learning, and random pieces of code.\n\n[![algorithms-unlocked](http://img.shields.io/badge/algorithms--unlocked-100%25_(10/10)-brightgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/algorithms-unlocked/progress.md) \n[![x86-64-assembly-language-programming-with-ubuntu](http://img.shields.io/badge/x86--64--assembly--language--programming--with--ubuntu-25%25_(5/20)-orange.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/x86-64-assembly-language-programming-with-ubuntu/progress.md) \n[![haskell-book](http://img.shields.io/badge/haskell--book-19.34%25_(77/398)-orange.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/haskell-book/progress.md) \n[![java-concurrency-in-practice](http://img.shields.io/badge/java--concurrency--in--practice-100%25_(17/17)-brightgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/java-concurrency-in-practice/progress.md) \n[![css-grid](http://img.shields.io/badge/css--grid-33.33%25_(1/3)-yellow.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/css-grid/progress.md) \n[![golang](http://img.shields.io/badge/golang-16.95%25_(29/171)-orange.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/golang/progress.md) \n[![rfc5424](http://img.shields.io/badge/rfc5424-100%25_(39/39)-brightgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/rfc5424/progress.md) \n[![the-go-programming-language](http://img.shields.io/badge/the--go--programming--language-23.27%25_(27/116)-orange.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/the-go-programming-language/progress.md) \n[![bosh](http://img.shields.io/badge/bosh-13.75%25_(11/80)-red.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/bosh/progress.md) \n[![coreos](http://img.shields.io/badge/coreos-70.58%25_(12/17)-green.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/coreos/progress.md) \n[![cpp-how-to-program](http://img.shields.io/badge/cpp--how--to--program-100%25_(32/32)-brightgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/cpp-how-to-program/progress.md) \n[![saltstack](http://img.shields.io/badge/saltstack-28.1%25_(52/185)-orange.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/saltstack/progress.md) \n[![terraform](http://img.shields.io/badge/terraform-78.26%25_(18/23)-green.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/terraform/progress.md) \n[![docker](http://img.shields.io/badge/docker-7.14%25_(1/14)-red.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/docker/progress.md) \n[![go-by-example](http://img.shields.io/badge/go--by--example-50.76%25_(33/65)-yellowgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/go-by-example/progress.md) \n[![golang-tour](http://img.shields.io/badge/golang--tour-100%25_(95/95)-brightgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/golang-tour/progress.md) \n[![digital-fundamentals](http://img.shields.io/badge/digital--fundamentals-3.6%25_(4/111)-red.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/digital-fundamentals/progress.md) \n[![practical-cryptography-with-go](http://img.shields.io/badge/practical--cryptography--with--go-85%25_(17/20)-brightgreen.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/practical-cryptography-with-go/progress.md) \n[![zeromq-the-guide](http://img.shields.io/badge/zeromq--the--guide-14.8%25_(41/277)-red.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/zeromq-the-guide/progress.md) \n[![openstack-installation-guide](http://img.shields.io/badge/openstack--installation--guide-8.6%25_(8/93)-red.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/openstack-installation-guide/progress.md) \n[![python-logging](http://img.shields.io/badge/python--logging-48.31%25_(43/89)-yellow.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/python-logging/progress.md) \n[![swift-wwdc](http://img.shields.io/badge/swift--wwdc-0%25_(0/5)-red.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/swift-wwdc/progress.md) \n[![bitfountain-ios7](http://img.shields.io/badge/bitfountain--ios7-19.59%25_(106/541)-orange.svg?style=flat)](https://github.com/jasonkeene/playground/blob/master/bitfountain-ios7/progress.md)\n" }, { "alpha_fraction": 0.49035292863845825, "alphanum_fraction": 0.5096470713615417, "avg_line_length": 22.87640380859375, "blob_id": "4d7b071b420642738c099d25b4382516a3eda229", "content_id": "dde86679c5eacc30e25ab3806826a0e5e24825f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2125, "license_type": "no_license", "max_line_length": 75, "num_lines": 89, "path": "/object-counter/counter.js", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "function Counter() {\n this.elements = {};\n}\nCounter.prototype.lookup = function (element) {\n var el, i;\n el = this.elements[element] || [];\n for (i = 0; i < el.length; i++) {\n if (el[i][0] === element) {\n return el[i][1];\n }\n }\n};\nCounter.prototype.add = function (element) {\n var el, found, i;\n el = this.elements[element] || [];\n found = false;\n for (i = 0; i < el.length; i++) {\n if (el[i][0] === element) {\n el[i][1]++;\n found = true;\n }\n }\n if (!found) {\n el.push([element, 1]);\n }\n this.elements[element] = el;\n};\nCounter.prototype.consume = function (elements_list) {\n elements_list.map(function (elements) {\n elements.map(this.add, this);\n }, this);\n};\nCounter.prototype.filter = function (filter_func) {\n var result, key;\n result = [];\n for (key in this.elements) {\n if (this.elements.hasOwnProperty(key)) {\n result = result.concat(this.elements[key].filter(filter_func));\n }\n }\n return result;\n};\nCounter.prototype.gt = function (num) {\n return this.filter(function (tuple) {\n return tuple[1] > num;\n }).map(function (tuple) {\n return tuple[0];\n });\n};\nCounter.prototype.lt = function (num) {\n return this.filter(function (tuple) {\n return tuple[1] < num;\n }).map(function (tuple) {\n return tuple[0];\n });\n};\nCounter.prototype.eq = function (num) {\n return this.filter(function (tuple) {\n return tuple[1] == num;\n }).map(function (tuple) {\n return tuple[0];\n });\n};\n\nfunction uniq(array) {\n var result, i;\n result = [];\n for (i = 0; i < array.length; i++) {\n if (result.indexOf(array[i]) === -1) {\n result.push(array[i]);\n }\n }\n return result;\n}\n\n(function () {\n var sets, counter;\n sets = [\n [1, 2, 3],\n [4, 3, 5],\n [99, \"asdf\", 20],\n [1, 7, 3],\n [55, 55, 55, \"99\"],\n [98, \"asdf\", 21],\n ];\n counter = new Counter();\n counter.consume(sets.map(uniq));\n console.log(counter.gt(1));\n})();\n" }, { "alpha_fraction": 0.5312402844429016, "alphanum_fraction": 0.5713397860527039, "avg_line_length": 26.033613204956055, "blob_id": "009a3dc6b8884e6d02847e2a0f32293198c35cec", "content_id": "7336810b441c12db04cf5faf034b829d69619d17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3217, "license_type": "no_license", "max_line_length": 80, "num_lines": 119, "path": "/the-go-programming-language/ch3/src/surface/main.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"net/http\"\n\t\"time\"\n)\n\nconst (\n\twidth, height = 1024, 768\n\tcells = 100\n\txyrange = 30.0\n\txyscale = width / 2 / xyrange\n\tzscale = height * 0.4\n\tangle = math.Pi / 6\n)\n\nvar sin30, cos30 = math.Sin(angle), math.Cos(angle)\n\nfunc main() {\n\tfmt.Println(\"listening on http://localhost:8080\")\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/\", RenderSurface)\n\tmux.HandleFunc(\"/animated\", RenderAnimatedSurface)\n\thttp.ListenAndServe(\":8080\", mux)\n}\n\nfunc RenderSurface(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"image/svg+xml\")\n\n\tfmt.Fprintf(rw, \"<svg xmlns='http://www.w3.org/2000/svg' \"+\n\t\t\"style='stroke: rgb(40, 40, 40); stroke-width: 0.7' \"+\n\t\t\"width='%d' height='%d'>\\n\", width, height)\n\tfor i := 0; i < cells; i++ {\n\t\tfor j := 0; j < cells; j++ {\n\t\t\tax, ay, az := corner(i+1, j, 0)\n\t\t\tbx, by, bz := corner(i, j, 0)\n\t\t\tcx, cy, cz := corner(i, j+1, 0)\n\t\t\tdx, dy, dz := corner(i+1, j+1, 0)\n\t\t\tif valid(ax, ay, bx, by, cx, cy, dx, dy) {\n\t\t\t\tr, g, b := depthColor(az, bz, cz, dz)\n\t\t\t\tfmt.Fprintf(rw, \"<polygon points='%g,%g %g,%g %g,%g %g,%g' \"+\n\t\t\t\t\t\" style='fill: rgb(%d, %d, %d)' />\\n\",\n\t\t\t\t\tax, ay, bx, by, cx, cy, dx, dy, r, g, b)\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Fprintln(rw, \"</svg>\\n\")\n}\n\nfunc RenderAnimatedSurface(rw http.ResponseWriter, r *http.Request) {\n\tconst boundary = \"NEWSVGDATA\"\n\trw.Header().Set(\"Content-Type\", \"multipart/x-mixed-replace;boundary=\"+boundary)\n\trw.WriteHeader(http.StatusOK)\n\n\tfor frame := 0; ; frame++ {\n\t\tfmt.Fprintf(rw, \"\\r\\n--\"+boundary+\"\\r\\n\")\n\t\tfmt.Fprintf(rw, \"Content-type: text/html\\r\\n\\r\\n\")\n\n\t\tfmt.Fprintf(rw, \"<svg xmlns='http://www.w3.org/2000/svg' \"+\n\t\t\t\"style='stroke: rgb(40, 40, 40); stroke-width: 0.7' \"+\n\t\t\t\"width='%d' height='%d'>\\n\", width, height)\n\t\tfor i := 0; i < cells; i++ {\n\t\t\tfor j := 0; j < cells; j++ {\n\t\t\t\tax, ay, az := corner(i+1, j, frame)\n\t\t\t\tbx, by, bz := corner(i, j, frame)\n\t\t\t\tcx, cy, cz := corner(i, j+1, frame)\n\t\t\t\tdx, dy, dz := corner(i+1, j+1, frame)\n\t\t\t\tif valid(ax, ay, bx, by, cx, cy, dx, dy) {\n\t\t\t\t\tr, g, b := depthColor(az, bz, cz, dz)\n\t\t\t\t\tfmt.Fprintf(rw, \"<polygon points='%g,%g %g,%g %g,%g %g,%g' \"+\n\t\t\t\t\t\t\" style='fill: rgb(%d, %d, %d)' />\\n\",\n\t\t\t\t\t\tax, ay, bx, by, cx, cy, dx, dy, r, g, b)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(rw, \"</svg>\")\n\t\trw.(http.Flusher).Flush()\n\t\ttime.Sleep(time.Millisecond)\n\t}\n}\n\nfunc valid(values ...float64) bool {\n\tfor _, v := range values {\n\t\tif math.IsNaN(v) || math.IsInf(v, 0) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc corner(i, j, frame int) (float64, float64, float64) {\n\tx := xyrange * (float64(i)/cells - 0.5)\n\ty := xyrange * (float64(j)/cells - 0.5)\n\tz := depth(x, y, float64(frame)/24.0*math.Pi)\n\n\tsx := width/2 + (x-y)*cos30*xyscale\n\tsy := height/2 + (x+y)*sin30*xyscale - z*zscale\n\treturn sx, sy, z\n}\n\nfunc depth(x, y, offset float64) float64 {\n\tr := math.Hypot(x, y)\n\treturn math.Sin(r+offset) / r\n}\n\nfunc depth2(x, y float64) float64 {\n\treturn math.Cos(x/xyrange*2) + math.Sin(y/xyrange*2)\n}\n\nfunc depthColor(points ...float64) (int, int, int) {\n\tvar max float64\n\tfor _, v := range points {\n\t\tmax = math.Max(max, v)\n\t}\n\tmax *= 2\n\treturn int(255 * max), int(255 * (1 - math.Abs(0.5-max))), int(255 * (1 - max))\n}\n" }, { "alpha_fraction": 0.5735607743263245, "alphanum_fraction": 0.5970149040222168, "avg_line_length": 15.17241382598877, "blob_id": "15c9747832ff441e64750913e6e77185e07723b0", "content_id": "ff35ed6cfc576e3a14f5683c617090957c4bf31d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 469, "license_type": "no_license", "max_line_length": 36, "num_lines": 29, "path": "/the-go-programming-language/ch3/src/strings/anagram.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc main() {\n\tfmt.Println(Anagram(\"cba\", \"abc\"))\n\tfmt.Println(Anagram(\"cba\", \"cba\"))\n\n\tfmt.Println(Anagram(\"cba\", \"cbaa\"))\n\tfmt.Println(Anagram(\"cba\", \"caa\"))\n\tfmt.Println(Anagram(\"cba\", \"ccc\"))\n}\n\nfunc Anagram(s1, s2 string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\tfor _, r := range s1 {\n\t\tindex := strings.IndexRune(s2, r)\n\t\tif index < 0 {\n\t\t\treturn false\n\t\t}\n\t\ts2 = s2[:index] + s2[index+1:]\n\t}\n\treturn true\n}\n" }, { "alpha_fraction": 0.7772523760795593, "alphanum_fraction": 0.779025673866272, "avg_line_length": 48.75490188598633, "blob_id": "790b8c1f575a769b7c92bcbba59985752e17e213", "content_id": "7b67092274389f8528dfb0e9f3c448eed4cfd0e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20301, "license_type": "no_license", "max_line_length": 80, "num_lines": 408, "path": "/java-concurrency-in-practice/notes.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n# Preface\n\n- Read on the train, no notes.\n\n# Chapter 1 - Introduction\n\n- Read on the train, no notes. Basic introduction to concurrency concepts.\n\n# Chapter 2 - Thread Safety\n\n- Frameworks impose thread safety requirements as they spin up threads for\n you.\n- Stateless objects are thread safe because all their state is temporary and\n stored on the thread's stack and is (typically) not accessible from other\n threads.\n- If req/resp handler needs to remember stuff from one request to another that\n is when thread safety of the handler needs to be considered. ++operator is\n not atomic, it is composed of load, add and store steps aka read-modify-write.\n- Race conditions are where the correctness of an algorithm is dependent on\n lucky timing in the ordering of actions taken by each thread.\n- The most common race condition is check-then-act.\n- Not all data races are race conditions and not all race conditions are data\n races. check-then-act is when you make an observation about the system then\n take an action, however by the time you take the action the observation could\n have become invalid.\n- Lazy initialization is where you postpone initing an object until it is\n needed yet ensure that it is only initialized once. This commonly uses\n check-then-act.\n- Read-modify-write is a state transition from state A->B. This requires a\n thread to have exclusive access to reading and writing the state. If another\n thread reads the state before the first thread is done modifying it will have\n an invalid understanding of the state. Additionally, if a thread writes while\n the first thread is writing it can overwrite the first thread's write.\n- Atomic operations are indivisible. Thread B must wait for thread A to\n complete operations on the state before it can continue.\n- check-then-act and read-modify-write are both compound actions that need to\n be atomic to ensure thread safety.\n- Java has `java.util.concurrent.atomic` that gives you types that can be used\n to encapsulate values and operate on them atomically to ensure thread\n safety.\n- Adding more than one atomic state variable does not guarantee thread safety\n if the operations on those atomic variables need to be atomic themselves.\n- Synchronized blocks and methods guard critical sections of code. You can\n pass it an instance or class and it will add a lock to that instance/class.\n These locks are called intrinsic locks or monitor locks.\n- Intrinsic locks are mutexes.\n- Overly broad locks can cause performance issues.\n- Intrinsic locks are reentrant, meaning that if thread A already holds a lock\n if it tries to acquire it again, it will succeed and not deadlock.\n- For a given shared mutable variable, all access (reads and writes) must be\n guarded by the same lock to ensure thread safety.\n- It is recommended to only use a single lock to guard any given variable.\n- Invariants that involve multiple variables must be guarded by the same lock\n so that operations on them are atomic.\n- There is a balance between granularity and coarseness. If you are too course\n in your locking you have less concurrency, if you are too granular you pay\n the cost of lock overhead.\n\n## Discussion\n\n- How useful is it to make a type partially thread safe? For instance, if a\n diode is only used with one writer, one reader, it could be thread safe\n under those conditions but not when there are multiple writers or multiple\n readers. Another example is Eno's channel buffer used in his gRPC streaming\n demo.\n- Reentrant locks seem costly. They seem to be useful for intrinsic locks with\n inheritance. Is there a use in Go? Can you even implement such a thing\n considering Go's lack of support for thread locals?\n- Is there a situation where you would want to use more than one lock to guard\n a particular variable?\n\n# Chapter 3 - Sharing Objects\n\n- synchronized keyword is not only about guarding critical sections but also\n involves memory visibility.\n- writes from one thread are not guaranteed to be visible to another thread,\n either in a timely manner or at all.\n- Reordering of writes can occur as well if the critical section is not\n synchronized.\n- When sharing variables without synchronization you may see some variables be\n stale and others not. Stale reads are when writes from one thread are not\n visible to the reader thread.\n- out-of-thin-air safety is where a thread reads a value that was previously\n written by another thread. It is guaranteed to not be a random value.\n- 64-bit value load/store operations are not atomic and as a result, you may\n read the high 32 bits from one write but the low 32 bits from another write.\n This means 64-bit values need to be marked as volatile or use synchronization\n when reading and writing.\n- the volatile keyword may be used to ensure visibility of a variable. This\n prevents the compiler and runtime from reordering memory operations\n pertaining to this variable. All writes to memory before the write to the\n volatile memory are visible to the thread after reading from that variable.\n- If you can confine an object to a single thread at any given time, it can be\n used without synchronization. For example, a connection pool that is used to\n acquire a connection can be used by multiple threads since the pool is safe.\n Once a connection is acquired it is confined to a given thread. As a result,\n the connection does not need to be thread-safe.\n- Ad-hoc thread confinement is where it is left up to the implementation to\n not share references.\n- Stack and thread local confinement are also alternatives.\n- Immutable objects are always thread-safe.\n- If you do not properly publish shared state, using volatile or\n synchronization, you will have a bad time.\n- If the object is immutable then no synchronization is needed in order to\n publish.\n\n## Discussion\n\n- In Go, are operations in the atomic package guaranteed to be visible by\n other goroutines?\n- Using immutable holder objects for variables that are related by an\n invariant along with volatile for visibility is interesting since it\n requires no synchronization.\n\n# Chapter 4 - Composing Objects\n\n- Encapsulation makes analyzing concurrent programs easier. With public state,\n you have to worry about how the entire program might access the state vs the\n type's methods.\n- Instance confinement is where data is encapsulated and manipulated\n concurrently through a set of methods that serialize access to that state.\n- Monitor pattern is just marking all methods synchronized and calling it a\n day.\n- Delegating thread safety is where you hand of thread safety to fields that\n are thread safe. This does not work if you have invariants that relate to\n multiple, thread safe fields.\n- Client side locking is just using the same lock of the type you are\n extending.\n- Composition is similar to composition in Go where you embed other types and\n delegate methods to those types.\n- Document thread safety guarantees for users and synchronization policy for\n maintainers.\n- Java documentation, at least at the time of writing, is not great when it\n comes to documenting thread safety guarantees.\n- Latches, barriers, semaphores, and blocking queues are types of\n synchronizers.\n- Latches block all threads until the terminal state is reached in which case\n all threads unblock.\n- CountDownLatch is like a WaitGroup.\n- Semaphores control how many things can run at once. It is backed by an int\n counter that is typically inited to be a certain value. This value can grow\n and shrink. If it goes < 0 threads will block until it grows >= 0. This is\n useful for implementing pools.\n- Barriers are useful for releasing workers to work in parallel and then\n joining them back up.\n\n## Discussion\n\n- Intrinsic locks are public and exposed to the outside world. That seems\n really messed up.\n\n# Chapter 5 - Building Blocks\n\n- Delegation can be powerful. Use built-in thread safe classes to delegate\n thread safety where possible.\n- `Collections.synchronizedXxx` synchronize every public method.\n- Compound actions still need to use client-side synchronization.\n- Iteration can have timing issues as you call `.size` first and then n `.get`\n calls after. If another thread modifies the map after `.size` was called you\n get an exception or don't get all the data.\n- Java5 has an iteration syntax that will throw an exception of modifications\n are made to the collection during iteration.\n- You can clone collections for safe iteration since the copy will be thread\n confined. \n- Gotta watch out for hidden state access via `toString`, `hashCode`, and\n `equal` methods.\n- `ConcurrentHashMap` uses lock striping vs a single lock for the entire map.\n- Iteration on `ConcurrentHashMap` does not require holding a client lock as\n it is weakly consistent. This allows for writes while iterating that may or\n may not show up in the results of the iteration.\n- Work stealing is where each worker thread has its own deque. When it is done\n with all the items in its queue it can read from the tail of another\n thread's deque. This minimizes contention compared to a single work queue for\n all workers.\n\n# Chapter 6 - Task Execution\n\n- Choosing appropriate task size and isolation boundaries is critical.\n- Creating a thread per task will allocate stacks for each thread and can cause\n paging and/or out of memory errors. Go doesn't suffer this problem quite as\n bad since it mux's goroutines onto a pool of OS threads and has lightweight\n stacks for each goroutine.\n- Alternatively, you can have a pool of threads that read off a queue that you\n can put work on from a single thread that accepts connections.\n- An execution policy should be defined at deploy time and establish:\n - How many tasks can execute concurrently?\n - How many tasks may be queued waiting to execute?\n - What actions to take before and after executing a task?\n - How to reject tasks?\n - What order should tasks execute in (FIFO, LIFO, priority)?\n - What threads execute what tasks?\n- homogeneous workloads that can independently execute allow for high\n parallelism.\n\n## Discussion\n\n- It seems Java has put much more thought into lifecycle management of\n concurrent actors.\n\n# Chapter 7 - Cancellation and Shutdown\n\n- Similar to Go, Java provides no mechanism to safely force a thread to stop.\n- Each thread has an interrupted status that can be set from the outside.\n Blocking processes typically read from this value and throw an exception.\n- There are forms of blocking that do not throw interrupted exception such as\n synchronous IO and intrinsic locks.\n- A poison pill is a sentinel value that is put in a queue to signal teardown\n once it is reached.\n- If you are a thread pool and are calling untrusted code, it is best to catch\n all exceptions.\n- JVM will fire shutdown hooks and possibly finalizers on graceful shutdown.\n- Daemon threads are stuff like the GC that the JVM will preemptively abort on\n shutdown.\n\n## Discussion\n\n- Seems like context with cancel func and deadline/timeout handle most of the\n shutdown situations explained in this chapter.\n\n# Chapter 8 - Applying Thread Pools\n\n- If you are using the executor framework your tasks should be:\n - Independent of one another, not depend on results, timing, or side effects\n of other tasks.\n - Should not use thread local state or otherwise exploit thread confinement.\n You should be able to swap out a single threaded executor with a thread\n pool without issues.\n - Take a long time.\n- If you have tasks that block waiting for results of other tasks this can\n cause starvation deadlock.\n- Pools can be sized by `N = CPUs * Utilization * (1 + Wait/Compute)`\n- Using a pool avoids unbounded thread creation but you can still run out of\n memory if tasks are produced at a higher rate than they are processed. You\n can either throw messages on the ground or rate limit input in some way.\n- Before running into memory consumption issues, however, the more tasks\n waiting in the queue the high the latency per task.\n- Bounding the pool or queue size with tasks that block on other tasks can\n cause starvation deadlocks.\n- Saturation policies describe behavior when the pool's queue is full:\n - Abort: Throws an exception notifying the caller.\n - Caller Runs: Do not discard or throw an exception but run the task given\n in the caller's thread.\n - Discard: Silently discard the new task given.\n - Discard Oldest: Silently discard the next task to run to provide room in\n the queue for the new task.\n - Block: Block the thread writing into the queue until there is room.\n- Recursive algorithms can be made parallel if there are tasks that have no\n dependencies on intermediate results. For example computing a result for\n each node in the tree. You can walk the tree sequentially, then do each\n computation in a task with a shared output queue, then collect the results.\n\n## Discussion\n\n- Caller Runs policy is interesting. Not relevant to our domain but\n interesting none the less.\n\n# Chapter 9 - GUI Applications\n\n- GUI frameworks are single threaded for a reason, accessing state from\n multiple threads with events triggered by hardware and events triggered by\n the application can lead to ordering issues in lock acquisition and thus\n deadlocks.\n- Long running processes can be handled in worker threads that then write\n events back into the event thread. All GUI state is thread confined to the\n event thread.\n\n# Chapter 10 - Avoiding Liveness Hazards\n\n- Lock-ordering deadlocks can be fixed by having all threads that need the\n same locks acquire them in the same order.\n- Calling alien methods while holding a lock risks getting into a deadlock as\n the alien method can then try to acquire a lock.\n- Only making open calls to alien methods lowers deadlock risk and makes\n analysis easier.\n- Modifying thread priorities is platform dependent and can cause starvation\n of lower priority threads.\n- Livelock is where a thread is active but can not make progress. This can\n occur when multiple threads change their state in response to each other.\n For instance, two people walking down a hall both attempt to move out of each\n other's way but then are in each other's way again.\n\n# Chapter 11 - Performance and Scalability\n\n- \"How fast\" measurements are performance, service time, latency.\n- \"How much\" measurements are scalability, capacity, throughput.\n- When making performance decisions:\n - What does it mean to be \"faster\"?\n - Under what conditions will this approach actually be faster? Light/heavy\n load? Large/small datasets? Do you have measurements?\n - How often are these conditions likely to arise? Do you have measurements?\n - How often are conditions different.\n - What hidden costs, such as maintenance risk, are there?\n- Schedulers will run a given task for a minimum amount of time to mitigate\n context switches and increase throughput.\n- Increased lock contention increases context switches and serialization\n costs.\n- Lock contention probability is determined by how frequent the lock is\n requested and how long the lock is held for once acquired.\n- Lock splitting and striping are methods of providing higher granularity in\n situations where contention is high.\n- Avoid hot fields where possible. Striping, volatile, or atomics can help\n with mitigating the cost of hot fields.\n- Object pooling to minimize allocations is mostly a bad idea with Java.\n\n# Chapter 12 - Testing Concurrent Programs\n\n- There are two categories of tests for concurrent types. Tests for safety and\n tests for liveness.\n- Testing concurrent software is hard.\n- Performance tests:\n - Collect data, draw graphs.\n - Measure for latency and throughput.\n - Disable optimizations such as dead code elimination as they will sometimes\n eliminate your benchmark code.\n\n## Discussion\n\n- Perhaps we should run our unit tests on a timer as well to help expose bugs\n due to interleavings of goroutines/threads that are not common.\n- Running more goroutines than procs and more procs than CPU cores along with\n calling `runtime.Gosched` in tests loops can generate more interleavings.\n- Pausing between benchmark runs to allow for GC to fully clean up.\n\n# Chapter 13 - Explicit Locks\n\n- Java provides a ReentrantLock that can be used to explicitly lock.\n- Use `finally` to ensure unlock occurs even when an exception happens.\n- Polling and timed locks are useful for deadlock avoidance when lock\n acquisition ordering can not be guaranteed.\n- Explicit locks are interruptable.\n- Hand over hand locking is where you must acquire another lock to release\n your current lock.\n- ReentrantLock is faster than intrinsic locks pre-Java 6.\n- ReentrantLocks can be fair on unfair. Fair being FIFO for acquisition.\n Fairness costs a lot.\n- ReadWriteLock allows for multiple readers at the same time, similar to Go's\n RWLock. They should only be used in read-heavy situations.\n\n# Chapter 14 - Building Custom Synchronizers\n\n- State dependence is where threads are blocked until a certain state exists.\n- All objects in Java have an intrinsic condition queue.\n- Conditional locking cooperates with the runtime to put threads to sleep and\n wake them up when the state changes.\n- Wait can unblock even if notify or notifyAll are not called by another\n thread.\n- You may use notify instead of notifyAll in situations where you have a\n single condition predicate.\n- Intrinsic condition queues should be encapsulated and not exposed, just like\n intrinsic locks.\n- Use multiple Conditions where you have multiple condition predicates.\n\n## Discussion\n\n- Intrinsic with synchronized keyword makes using conditional mutexes easier\n than in Go.\n\n# Chapter 15 - Atomic Variables and Nonblocking Synchronization\n\n- Lock/wait free data structures are immune to liveness issues.\n- Putting threads to sleep is costly.\n- Priority inversion is where a high priority thread is blocked on a lock held\n by a lower priority thread. Its effective priority is limited by the other\n thread.\n- CAS, compare and swap (or compare and set) allows you to atomically read\n from a piece of memory and at a later time atomically write to that memory.\n If the value has changed in the meantime it will fail and allow you to try\n again. This is lock-free but not wait-free.\n- JVM will use a spin lock if CAS type instructions are not supported by the\n target hardware.\n- Locks can perform better under ultra-high contention scenarios.\n- It is a great idea to not share data across cores at all if you can.\n- The ABA problem is an issue with CAS where some other thread has changed the\n value from A to B and back to A. This is invalid for some algorithms. To\n mitigate this you can read/write a tuple of (value, version). Every write will\n increment the version and thus invalidate other CAS operations.\n\n## Discussion\n\n- The JVM adaptively determines if a thread should spin or be suspended based\n on how long the lock has previously been held for. That's cool!\n- It's weird that they implemented Counters with CAS and not atomic increment.\n\n# Chapter 16 - The Java Memory Model\n\n- A memory model is the conditions under which a write in one core is visible\n to another core.\n- ISAs will have their own memory model along with instructions called\n barriers or fences to get additional memory coordination guarantees.\n- Sequential consistency is where every read of a variable will see the last\n write to that value from any processor as if there is a sequential ordering\n to all instructions. This is not reality.\n- Actions by one thread might appear to execute in different orders from the\n perspective of different threads.\n- Happens-before relationship prevents reordering.\n- It is possible to piggyback on other synchronization that is forcing a\n happens-before ordering.\n- Unsafe-publication can occur if you don't have a happens-before relationship\n between thread B accessing a reference to an object thread A published.\n- static initialization happens before a class can be used by threads and is\n guarded by a lock. As a result, the statically initialized variable is\n visible to all threads.\n\n## Discussion\n\n- Piggybacking seems very cool and also very dangerous.\n- I wonder what visibility guarantees are given to `func init() {}` or `var\n foo = InitFoo()`.\n" }, { "alpha_fraction": 0.653766393661499, "alphanum_fraction": 0.6966136693954468, "avg_line_length": 19.671428680419922, "blob_id": "efefb7a7ac3f7b5427ecb06ccb8aa73fd2309823", "content_id": "327c36f6794a8bd32730eed2116ee05ecc345ac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1447, "license_type": "no_license", "max_line_length": 76, "num_lines": 70, "path": "/data-structures/hash-table/hash.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package hash\n\nimport (\n\t\"crypto/sha1\"\n\t\"encoding/binary\"\n)\n\n// Design:\n// - Table size should be power of two\n// - Use only builtin arrays (maybe slices)\n// - Use bit masking to mod into table size\n// - Table should dynamically resize to next power when collisions threshold\n// is reached\n// - Keys will need to be stored in buckets with values for verification\n// checking and to allow for rehashing\n// - Will not use open addressing\n\nconst InitialCapacity = 8\n\ntype BucketValue struct {\n\tkey string\n\tvalue interface{}\n}\n\n// TODO: make buckets contain multiple bucket values for collisions\ntype Bucket BucketValue\n\ntype Hash struct {\n\tbuckets []Bucket\n}\n\nfunc NewHash() *Hash {\n\treturn &Hash{\n\t\tbuckets: make([]Bucket, InitialCapacity),\n\t}\n}\n\nfunc (h *Hash) Set(k string, v interface{}) {\n\tindex := truncate(HashFunc([]byte(k)), len(h.buckets))\n\th.buckets[index] = Bucket{\n\t\tkey: k,\n\t\tvalue: v,\n\t}\n}\n\nfunc (h *Hash) Get(k string) interface{} {\n\tindex := truncate(HashFunc([]byte(k)), len(h.buckets))\n\t// TODO: verify key to detect collisions\n\treturn h.buckets[index].value\n}\n\nfunc HashFunc(data []byte) uint64 {\n\thash := sha1.New()\n\thash.Write(data)\n\tsum := make([]byte, 0)\n\tsum = hash.Sum(sum)\n\n\tresult := binary.BigEndian.Uint64(sum)\n\treturn result\n}\n\nfunc truncate(value uint64, size int) uint64 {\n\treturn value % uint64(size)\n}\n\n// TODO: use masking vs mod\n\n// 000000000000 1111\n// 101010101101 0101 bitwise and\n// 000000000000 0101\n" }, { "alpha_fraction": 0.4773869216442108, "alphanum_fraction": 0.49497488141059875, "avg_line_length": 13.214285850524902, "blob_id": "2fb19aa65a9ab41bd0bccdefc99bf04c25fc56a6", "content_id": "317df9a0f1d027335e6df99b2703722c95ef550b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 398, "license_type": "no_license", "max_line_length": 39, "num_lines": 28, "path": "/algorithms/sort/quick.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort\n\nfunc Quick(a []int) {\n\tquick(a, 0, len(a))\n}\n\nfunc quick(a []int, q, r int) {\n\tif q >= r-1 {\n\t\treturn\n\t}\n\tpivot := partition(a, q, r)\n\tquick(a, q, pivot)\n\tquick(a, pivot+1, r)\n}\n\nfunc partition(a []int, q, r int) int {\n\trhs := q\n\tfor i := q; i < r-1; i++ {\n\t\tif a[i] >= a[r-1] {\n\t\t\tcontinue\n\t\t}\n\t\ta[rhs], a[i] = a[i], a[rhs]\n\t\trhs++\n\t}\n\n\ta[rhs], a[r-1] = a[r-1], a[rhs]\n\treturn rhs\n}\n" }, { "alpha_fraction": 0.5811001658439636, "alphanum_fraction": 0.5811001658439636, "avg_line_length": 27.31999969482422, "blob_id": "a296f0482cfe7e464da9a8aa521d7d76cc6a66ea", "content_id": "f71d92f3b2d9929928a95f28696878fa7537fc95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 709, "license_type": "no_license", "max_line_length": 59, "num_lines": 25, "path": "/terraform/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Getting Started Guide](https://www.terraform.io/intro/)\n\n - [x] What is Terraform?\n - [x] Use Cases\n - [x] Terraform vs. Other Software\n - [x] Chef, Puppet, etc.\n - [x] CloudFormation, Heat, etc.\n - [x] Boto, Fog, etc.\n - [x] Custom Solutions\n - [x] Getting Started\n - [x] Install Terraform\n - [x] Build Infrastructure\n - [x] Change Infrastructure\n - [x] Destroy Infrastructure\n - [x] Resource Dependencies\n - [x] Provision\n - [x] Input Variables\n - [x] Output Variables\n - [x] Modules\n - [x] Next Steps\n - [ ] Example Configurations\n - [ ] Basic Two-Tier AWS Architecture\n - [ ] Cross Provider\n - [ ] Count Parameter\n - [ ] Consul\n" }, { "alpha_fraction": 0.5877193212509155, "alphanum_fraction": 0.601403534412384, "avg_line_length": 25.38888931274414, "blob_id": "49ea04e6f220087e0a95bb5aea78deed11c82e95", "content_id": "62882666a15a3e0f883ff337c09b00e6bcdcced1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2850, "license_type": "no_license", "max_line_length": 72, "num_lines": 108, "path": "/build_readme.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport subprocess\n\n\nREADME_TEMPLATE = \"\"\"\\\nA place for experiments, learning, and random pieces of code.\n\n{progress}\"\"\"\n\nPROGRESS_TEMPLATE = (\n \"[![{name}](http://img.shields.io/badge/\"\n \"{escaped}-{percent}%25_({complete}/{total})-{color}.svg?style=flat\"\n \")](https://github.com/jasonkeene/playground/blob/master/{name}/\"\n \"progress.md) \\n\"\n)\n\nREPO_ROOT = os.path.dirname(os.path.realpath(__file__))\n\n\ndef badge_color(percent):\n \"\"\"Return sheild.io badge color.\"\"\"\n if percent < 16.6:\n return 'red'\n elif percent < 33.2:\n return 'orange'\n elif percent < 50:\n return 'yellow'\n elif percent < 66.4:\n return 'yellowgreen'\n elif percent < 83.0:\n return 'green'\n else:\n return 'brightgreen'\n\n\ndef escape_shield_name(name):\n \"\"\"Return the shield name escaped for shield.io URL.\"\"\"\n return name.replace('-', '--')\n\n\ndef get_progress_status(name):\n \"\"\"Return the percent, complete, and total progress.\"\"\"\n with open(os.path.join(REPO_ROOT, name, 'progress.md')) as f:\n data = f.read()\n uncomplete = data.count('- [ ]')\n complete = data.count('- [x]')\n total = complete + uncomplete\n percent = float(complete) / total * 100\n return percent, complete, total\n\n\ndef percent_str(percent):\n if not percent:\n return '0'\n pstr = str(percent)\n if '.' in pstr:\n i = pstr.index('.')\n if len(pstr) > i + 2:\n pstr = pstr[0:i + 3]\n pstr = pstr.rstrip('0')\n pstr = pstr.rstrip('.')\n return pstr\n\n\ndef generate_progress():\n \"\"\"Return a string of progress information.\"\"\"\n progress_dirs = find_progress_dirs()\n progress = ''\n for name in progress_dirs:\n percent, complete, total = get_progress_status(name)\n progress += PROGRESS_TEMPLATE.format(\n name=name,\n escaped=escape_shield_name(name),\n percent=percent_str(percent),\n complete=complete,\n total=total,\n color=badge_color(percent),\n )\n return progress.strip()\n\n\ndef order_progress_dir(name):\n path = os.path.join(REPO_ROOT, name, 'progress.md')\n command = 'git log --format=\"%at\" -- {}'.format(path)\n output = subprocess.check_output(command, shell=True)\n if output:\n return max(output.split('\\n'))\n return '999999999999999'\n\n\ndef find_progress_dirs():\n \"\"\"Find all top level directories that contain progress files.\"\"\"\n progress_dirs = []\n for node in os.listdir(REPO_ROOT):\n if os.path.isdir(node):\n if 'progress.md' in os.listdir(node):\n progress_dirs.append(node)\n return reversed(sorted(progress_dirs, key=order_progress_dir))\n\n\ndef main():\n print README_TEMPLATE.format(progress=generate_progress())\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7663043737411499, "alphanum_fraction": 0.7663043737411499, "avg_line_length": 15.727272987365723, "blob_id": "8179fb99aee0c7e1d8e384dbb8c3c1c82d17985e", "content_id": "758999616fcba3f8438f7148829d3a466fb27af7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 184, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/algorithms/sort/insertion_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort_test\n\nimport (\n\t\"testing\"\n\n\tsort \"github.com/jasonkeene/playground/algorithms/sort\"\n)\n\nfunc TestInsertionCorrectness(t *testing.T) {\n\ttestCorrectness(t, sort.Insertion)\n}\n" }, { "alpha_fraction": 0.5252435803413391, "alphanum_fraction": 0.5491585731506348, "avg_line_length": 13.662337303161621, "blob_id": "233ebd20b1709ba3923943a404a91aa1315f641a", "content_id": "6125a02effdce53cecfee454b125e6e2ea0f2f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1129, "license_type": "no_license", "max_line_length": 50, "num_lines": 77, "path": "/algorithms/compression/lzw.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package compression\n\nfunc LZW(in []byte) []int {\n\tif len(in) == 0 {\n\t\treturn []int{}\n\t}\n\n\ttable, next := initTable()\n\n\tvar (\n\t\ts string\n\t\tmatch string\n\t\tout []int\n\t)\n\tfor _, c := range in {\n\t\ts += string(c)\n\t\tif _, ok := table[s]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\ttable[s] = next\n\t\tnext++\n\n\t\tmatch, s = s[:len(s)-1], string(s[len(s)-1])\n\t\tout = append(out, table[match])\n\t}\n\n\treturn append(out, table[s])\n}\n\nfunc initTable() (map[string]int, int) {\n\ttable := make(map[string]int, 256)\n\tfor i := 0; i < 256; i++ {\n\t\ttable[string(i)] = i\n\t}\n\treturn table, 256\n}\n\nfunc initDecompressTable() (map[int]string, int) {\n\ttable := make(map[int]string, 256)\n\tfor i := 0; i < 256; i++ {\n\t\ttable[i] = string(i)\n\t}\n\treturn table, 256\n}\n\nfunc LZWDecompress(in []int) []byte {\n\tif len(in) == 0 {\n\t\treturn []byte{}\n\t}\n\n\ttable, next := initDecompressTable()\n\n\tvar (\n\t\tout []byte\n\t\tprev string\n\t)\n\n\tfor i, currCp := range in {\n\t\ts, ok := table[currCp]\n\t\tif !ok {\n\t\t\ts = prev + string(prev[0])\n\t\t}\n\t\tout = append(out, []byte(s)...)\n\n\t\tif i == 0 {\n\t\t\tprev = s\n\t\t\tcontinue\n\t\t}\n\n\t\ttable[next] = prev + string(s[0])\n\t\tnext++\n\t\tprev = s\n\t}\n\n\treturn out\n}\n" }, { "alpha_fraction": 0.3754747807979584, "alphanum_fraction": 0.3819859027862549, "avg_line_length": 12.355072021484375, "blob_id": "66d1ae7289acf1bd6472f3ef36f6a6931342eac0", "content_id": "9a7c398515a8bdb3c133f54e2c3a3ec8ddaf762b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1843, "license_type": "no_license", "max_line_length": 50, "num_lines": 138, "path": "/algorithms/str/transform_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package str_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/str\"\n)\n\nfunc TestTransform(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\ta string\n\t\tb string\n\t\tops []str.Operation\n\t}{\n\t\t\"copies\": {\n\t\t\ta: \"abc\",\n\t\t\tb: \"abc\",\n\n\t\t\tops: []str.Operation{\n\t\t\t\t{\n\t\t\t\t\tType: str.Copy,\n\t\t\t\t\tA: 'a',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Copy,\n\t\t\t\t\tA: 'b',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Copy,\n\t\t\t\t\tA: 'c',\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"replaces\": {\n\t\t\ta: \"abc\",\n\t\t\tb: \"123\",\n\n\t\t\tops: []str.Operation{\n\t\t\t\t{\n\t\t\t\t\tType: str.Replace,\n\t\t\t\t\tA: 'a',\n\t\t\t\t\tB: '1',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Replace,\n\t\t\t\t\tA: 'b',\n\t\t\t\t\tB: '2',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Replace,\n\t\t\t\t\tA: 'c',\n\t\t\t\t\tB: '3',\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"deletes\": {\n\t\t\ta: \"abc\",\n\t\t\tb: \"\",\n\n\t\t\tops: []str.Operation{\n\t\t\t\t{\n\t\t\t\t\tType: str.Delete,\n\t\t\t\t\tA: 'a',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Delete,\n\t\t\t\t\tA: 'b',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Delete,\n\t\t\t\t\tA: 'c',\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"inserts\": {\n\t\t\ta: \"\",\n\t\t\tb: \"123\",\n\n\t\t\tops: []str.Operation{\n\t\t\t\t{\n\t\t\t\t\tType: str.Insert,\n\t\t\t\t\tA: '1',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Insert,\n\t\t\t\t\tA: '2',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Insert,\n\t\t\t\t\tA: '3',\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"dna\": {\n\t\t\ta: \"ACAAGC\",\n\t\t\tb: \"CCGT\",\n\n\t\t\tops: []str.Operation{\n\t\t\t\t{\n\t\t\t\t\tType: str.Delete,\n\t\t\t\t\tA: 'A',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Copy,\n\t\t\t\t\tA: 'C',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Delete,\n\t\t\t\t\tA: 'A',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Replace,\n\t\t\t\t\tA: 'A',\n\t\t\t\t\tB: 'C',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Copy,\n\t\t\t\t\tA: 'G',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: str.Replace,\n\t\t\t\t\tA: 'C',\n\t\t\t\t\tB: 'T',\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tresult := str.Transform(tc.a, tc.b)\n\t\t\tif !cmp.Equal(result, tc.ops) {\n\t\t\t\tt.Fatalf(cmp.Diff(result, tc.ops))\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 28, "blob_id": "4282d03b64daf62ec2c0a2ca74aaeebcdc1e290d", "content_id": "9a2b33bc78d1c572f7fb8be10329dda51c73e004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 30, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/haskell-book/ch3/notes.txt", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nleft off at Exercises: Scope\n" }, { "alpha_fraction": 0.6814814805984497, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 21.5, "blob_id": "33fd0584538c38831d0bab880c52bd91f0e4b6f5", "content_id": "ddbb0835489ce0f92231521e850b1695aa3f7651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/leetcode/majority-element/solution.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "from collections import Counter\n\n\nclass Solution:\n def majorityElement(self, num):\n return Counter(num).most_common(1)[0][0]\n" }, { "alpha_fraction": 0.5744186043739319, "alphanum_fraction": 0.6186046600341797, "avg_line_length": 18.545454025268555, "blob_id": "6480e3f94cf90e9d4b235ebc5326ad4fc84e37fd", "content_id": "ae0a300c746387f22efc07d3b4486ddc899d886f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 430, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/zeromq-the-guide/chapter2/rrbroker.c", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\n#include <zmq.h>\n\n\nint main(void)\n{\n void *context = zmq_ctx_new();\n\n void *frontend = zmq_socket(context, ZMQ_ROUTER);\n void *backend = zmq_socket(context, ZMQ_DEALER);\n zmq_bind(frontend, \"tcp://0.0.0.0:11111\");\n zmq_bind(backend, \"tcp://0.0.0.0:22222\");\n\n zmq_proxy(frontend, backend, NULL);\n\n zmq_close(frontend);\n zmq_close(backend);\n zmq_ctx_destroy(context);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 15.230769157409668, "blob_id": "cdb66611bef318f7c31c2c507bb68a6aaf198ac0", "content_id": "dfc5c0c15cfea6ade3b7116ce39bd216f1f2f252", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 212, "license_type": "no_license", "max_line_length": 78, "num_lines": 13, "path": "/zerorpc-streaming/README.rst", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\nRun the server::\n\n ./server.py\n\nRun the client::\n\n ./client.py\n\nNotes\n-----\n\nI find it odd that there is no way to stop iteration on the server. It has to\nwait till the heartbeat timeout before it stops.\n" }, { "alpha_fraction": 0.6121672987937927, "alphanum_fraction": 0.6406844258308411, "avg_line_length": 13.61111068725586, "blob_id": "726d52852516bf4959996314ae9ec5c2483b8de3", "content_id": "8d74c8058535fd68b4b7789c0fb2ba6b639e15d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 526, "license_type": "no_license", "max_line_length": 49, "num_lines": 36, "path": "/golang/toward_go_1_3_and_beyond/old_pool.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\ntype Thing struct{}\n\nvar pool = make(chan *Thing, 10)\n\nvar count = 0\n\nfunc GetThing() *Thing {\n\tselect {\n\tcase t := <-pool:\n\t\treturn t\n\tdefault:\n\t}\n\tfmt.Println(\"creating new thing!\")\n\tcount++\n\treturn new(Thing)\n}\n\nfunc PutThing(t *Thing) {\n\tpool <- t\n}\n\nfunc main() {\n\tt1, t2, t3 := GetThing(), GetThing(), GetThing()\n\tPutThing(t1)\n\tPutThing(t2)\n\tt4, t5, t6 := GetThing(), GetThing(), GetThing()\n\tPutThing(t3)\n\tPutThing(t4)\n\tPutThing(t5)\n\tPutThing(t6)\n\tfmt.Printf(\"%d Things created!\\n\", count)\n}\n" }, { "alpha_fraction": 0.5561403632164001, "alphanum_fraction": 0.5771929621696472, "avg_line_length": 20.11111068725586, "blob_id": "4158d5784dfb96b59153589261be275fa1075f3a", "content_id": "c5ec71eb16497b13a37b237212ce08f108b50261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 570, "license_type": "no_license", "max_line_length": 73, "num_lines": 27, "path": "/golang-tour/methods_and_interfaces/13_web_servers.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n \"fmt\"\n \"log\"\n \"net/http\"\n)\n\ntype HelloServer struct {}\n\nfunc (h *HelloServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"%s %s\\n\", r.Method, r.URL)\n fmt.Fprintf(w, \"Requested %s\\n\", r.URL)\n for i := 0; i < 2000000; i++ {\n fmt.Fprintf(w, \"%d\\n\", i)\n }\n}\n\nfunc main() {\n listen := \"localhost:8123\"\n server := new(HelloServer)\n fmt.Printf(\"Starting server listening on http://%s/\\n\", listen)\n err := http.ListenAndServe(listen, server)\n if err != nil {\n log.Fatal(err)\n }\n}\n" }, { "alpha_fraction": 0.6023622155189514, "alphanum_fraction": 0.6102362275123596, "avg_line_length": 12.368420600891113, "blob_id": "b17e3312e73ee5bf39defdd2fb8598f9d8244f61", "content_id": "e3829b58e65d2ce635e67563eb4237975fd3be98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 254, "license_type": "no_license", "max_line_length": 42, "num_lines": 19, "path": "/the-go-programming-language/ch1/src/echo/echo.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package echo\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc Echo1(fragments []string) {\n\tvar s, sep string\n\tfor _, f := range fragments {\n\t\ts += sep + f\n\t\tsep = \" \"\n\t}\n\tfmt.Println(s)\n}\n\nfunc Echo2(fragments []string) {\n\tfmt.Println(strings.Join(fragments, \" \"))\n}\n" }, { "alpha_fraction": 0.617241382598877, "alphanum_fraction": 0.6241379380226135, "avg_line_length": 16.058822631835938, "blob_id": "ac77b1029edd644418d7b048ff962a15c54e8e29", "content_id": "2d2ef51f95b53029ecc783a02c6204156f90c46e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 291, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/algorithms/search/linear_sentinel.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package search\n\n// Θ(n)\nfunc LinearSentinel(data []int, number int) int {\n\tlastIndex := len(data) - 1\n\tlast := data[lastIndex]\n\tdata[lastIndex] = number\n\tvar i int\n\tfor data[i] != number {\n\t\ti++\n\t}\n\tdata[lastIndex] = last\n\tif i < lastIndex || data[i] == number {\n\t\treturn i\n\t}\n\treturn -1\n}\n" }, { "alpha_fraction": 0.512738823890686, "alphanum_fraction": 0.5254777073860168, "avg_line_length": 12.65217399597168, "blob_id": "9a5826e71bbfa7a50822a68181b44eefc3286b3a", "content_id": "84f08519f407c71ecf03e1a5e1472b431dfe86d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 314, "license_type": "no_license", "max_line_length": 24, "num_lines": 23, "path": "/the-go-programming-language/ch3/src/constants/si.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nconst (\n\tKB = 1000\n\tMB = KB * KB\n\tGB = MB * KB\n\tTB = GB * KB\n\tPB = TB * KB\n\tEB = PB * KB\n\tZB = EB * KB\n\tYB = ZB * KB\n)\n\nfunc main() {\n\tfmt.Println(\"KB =\", KB)\n\tfmt.Println(\"MB =\", MB)\n\tfmt.Println(\"GB =\", GB)\n\tfmt.Println(\"TB =\", TB)\n\tfmt.Println(\"PB =\", PB)\n\tfmt.Println(\"EB =\", EB)\n}\n" }, { "alpha_fraction": 0.703125, "alphanum_fraction": 0.703125, "avg_line_length": 15, "blob_id": "302e00dc1dd2866815232758dab4ab77c2556b1d", "content_id": "b6266b28f2b7e75492d0f7457ae52acf49e51efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 64, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/golang/how_to_write_go_code/source_me.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport GOPATH=$(pwd)\nexport PATH=$PATH:$GOPATH/bin\n" }, { "alpha_fraction": 0.6176100373268127, "alphanum_fraction": 0.6213836669921875, "avg_line_length": 32.08333206176758, "blob_id": "ca11f11bbafabe97effd37609be50292edcbc40f", "content_id": "1bc4b391fd51c117954d91bc10acf8a559f49836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 797, "license_type": "no_license", "max_line_length": 87, "num_lines": 24, "path": "/practical-cryptography-with-go/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n## [Practical Cryptography With Go](https://leanpub.com/gocrypto/read)\n\n - [x] Preface\n - [x] Sample Code\n - [x] A Brief Tour of the Book\n - [x] Chapter One: Introduction\n - [x] Terminology\n - [x] I’ve Got A Bad Case of the FIPS\n - [x] Threat Models\n - [x] Wrapping Up\n - [x] Chapter Two: Symmetric Ciphers\n - [x] Keys and the Value of Random Numbers\n - [x] Confidentiality and IND-CPA\n - [x] Salsa20: A Stream Cipher\n - [x] Block Cipher Modes\n - [x] Encrypting and Decrypting Data with AES-CBC\n - [x] The Role of Symmetric Cryptography\n - [x] Conclusion\n - [ ] Exercises\n - [x] Go Packages\n - [ ] Source Examples\n - [ ] Select Bibliography\n\n## [A Tour of Cryptography Packages in Go](https://www.youtube.com/watch?v=H8nA_ZZxaMU)\n" }, { "alpha_fraction": 0.5460317730903625, "alphanum_fraction": 0.6317460536956787, "avg_line_length": 18.6875, "blob_id": "9ecbcffa4967ddb81de2fa680757201206824ffc", "content_id": "45ddbbbc57d42f5e6a50e2efa9f9ea39b733f062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 315, "license_type": "no_license", "max_line_length": 55, "num_lines": 16, "path": "/cpp-how-to-program/chapter9/Makefile", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "all: bin/fig09_03 bin/copy\n\nclean:\n\t[ -d bin ] && rm -r bin || exit 0\n\nbin:\n\tmkdir bin\n\nbin/fig09_03: bin fig09_03.cpp bin/Time.o\n\tg++ -std=c++11 fig09_03.cpp bin/Time.o -o bin/fig09_03\n\nbin/Time.o: bin Time.cpp\n\tg++ -std=c++11 -c Time.cpp -o bin/Time.o\n\nbin/copy: bin copy.cpp\n\tg++ -std=c++11 copy.cpp -o bin/copy\n" }, { "alpha_fraction": 0.5754527449607849, "alphanum_fraction": 0.6016096472740173, "avg_line_length": 15.566666603088379, "blob_id": "17aad4b07c42066d3b247b06a1283dea7aebe2a8", "content_id": "c3dd1c1bc74724e4df1dbbf9a39af2ceadc832ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 497, "license_type": "no_license", "max_line_length": 76, "num_lines": 30, "path": "/golang/toward_go_1_3_and_beyond/sync_pool.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n)\n\ntype Thing struct{}\n\nvar pool = sync.Pool{\n\tNew: func() interface{} {\n\t\tfmt.Println(\"creating new thing!\")\n\t\tcount++\n\t\treturn new(Thing)\n\t},\n}\n\nvar count = 0\n\nfunc main() {\n\tt1, t2, t3 := pool.Get().(*Thing), pool.Get().(*Thing), pool.Get().(*Thing)\n\tpool.Put(t1)\n\tpool.Put(t2)\n\tt4, t5, t6 := pool.Get().(*Thing), pool.Get().(*Thing), pool.Get().(*Thing)\n\tpool.Put(t3)\n\tpool.Put(t4)\n\tpool.Put(t5)\n\tpool.Put(t6)\n\tfmt.Printf(\"%d Things created!\\n\", count)\n}\n" }, { "alpha_fraction": 0.6929577589035034, "alphanum_fraction": 0.7380281686782837, "avg_line_length": 43.25, "blob_id": "8591e9f59b325e58c42cbf392048fe59e047dbb5", "content_id": "7eb901c6ca0663efaa05f15153f024a5a8c9b12a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 355, "license_type": "no_license", "max_line_length": 78, "num_lines": 8, "path": "/openstack-installation-guide/notes.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n### Three-node architecture with OpenStack Networking (neutron)\n\n**Requirements for VMs**:\n\n- Controller Node: 1 processor, 2 GB memory, and 5 GB storage\n- Network Node: 1 processor, 512 MB memory, and 5 GB storage\n- Compute Node: 1 processor, 2 GB memory, and 10 GB storage\n- Make sure your VM's have an MTU of 1450 (GRE tunnels add to the packet size)\n" }, { "alpha_fraction": 0.5847952961921692, "alphanum_fraction": 0.6140350699424744, "avg_line_length": 20.375, "blob_id": "2670e5bb298dc3f0b870a035f24f396f514e4c7a", "content_id": "2d06e4fdc5125af21658e6cbfd0bbeb253042134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 171, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/golang-tour/basics/more_types_structs_slices_and_maps/view_exercise_slices.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/bin/sh\nexport GOPATH=$(pwd)\ngo get\ngo run 14_excercise_slices.go |\n tr ':' ' ' |\n awk '{print $2}' |\n base64 -D > exercise_slices.png\nopen exercise_slices.png\n" }, { "alpha_fraction": 0.6820276379585266, "alphanum_fraction": 0.7004608511924744, "avg_line_length": 30, "blob_id": "5a1efe1721d6f2bfe4ac213d3adabb329c848086", "content_id": "9d0b5d422f70d600727639bbdcc1fd2f4703731d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 217, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/swift-wwdc/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [WWDC Talks](https://developer.apple.com/videos/wwdc/2014/)\n\n- [ ] Introduction to Swift\n- [ ] Intermediate Swift\n- [ ] Advanced Swift\n- [ ] Integrating Swift with Objective-C\n- [ ] Swift Interoperability in Depth\n" }, { "alpha_fraction": 0.7150964736938477, "alphanum_fraction": 0.7287173867225647, "avg_line_length": 57.66666793823242, "blob_id": "171ce70694eb6d28d60d031a6b9e7d2169ca1a64", "content_id": "8e5bb4a2c384d0f1257581ed38a3018fe0d5320f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 881, "license_type": "no_license", "max_line_length": 78, "num_lines": 15, "path": "/x86-64-assembly-language-programming-with-ubuntu/exercises/ch-5-quiz.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n1. The text says it is a one to one relationship, however it isn't. There are\n multiple, redundant encodings for the same instruction in assembly.\n2. A symbol table is created. Optionally, during the first pass the assembler\n can start assembling instructions.\n # forgot macro expansion and evaluation of constant expressions\n3. Using the symbol table the instructions are all assembled.\n4. The linker takes object files and creates an executable binary out of them.\n It does this by rewriting addresses to point to the right places in memory.\n5. The loader sets up memory and loads the program's contents into memory for\n execution to begin.\n6. PI equ 3.14\n7. source -> assembler -> object --> linker -> exectuable -> loader -> memory\n object / \n8. At runtime.\n9. The symbol names and the locations in memory where they point to.\n" }, { "alpha_fraction": 0.4527243673801422, "alphanum_fraction": 0.46233972907066345, "avg_line_length": 17.086956024169922, "blob_id": "867357fba77af687748eb5623e899c66f5f68aa3", "content_id": "a2716d2a98c6b913b744c362be161369645272aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1248, "license_type": "no_license", "max_line_length": 50, "num_lines": 69, "path": "/concurrent-fizzbuzz/fizzbuzz.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\n\nimport (\n \"fmt\"\n \"strconv\"\n)\n\n\n// sentinel value for channels\nvar EOC int = 0\n\n\nfunc responder(in chan int, out chan string) {\n for {\n data := <- in\n\n // break if you hit sentinel\n if data == EOC {\n out <- string(EOC)\n break\n }\n\n // switch to send data to out chan\n switch {\n case data % 3 == 0 && data % 5 == 0:\n out <- \"fizzbuzz\"\n case data % 3 == 0:\n out <- \"fizz\"\n case data % 5 == 0:\n out <- \"buzz\"\n default:\n out <- strconv.Itoa(data)\n }\n }\n}\n\n\nfunc requester(in chan int) {\n // send a seqence of values into in chan\n for i := 1; i < 22; i++ {\n in <- i\n }\n\n // signal termination of channel with sentinel\n in <- EOC\n}\n\n\nfunc main() {\n // create in and out channels\n in := make(chan int)\n out := make(chan string)\n\n // fire off requester and responder\n go requester(in)\n go responder(in, out)\n\n // read from out channel and display to screen\n for {\n data := <- out\n\n if data != string(EOC) {\n fmt.Printf(\"%s \", data)\n } else {\n break\n }\n }\n}\n" }, { "alpha_fraction": 0.5589005351066589, "alphanum_fraction": 0.5837696194648743, "avg_line_length": 16.363636016845703, "blob_id": "8bc9ae0cdc9d9bd858edce0ff0a4eb33d2e630de", "content_id": "550709cd60a3eacad4db322164f0b6f64976e892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 764, "license_type": "no_license", "max_line_length": 58, "num_lines": 44, "path": "/algorithms/compression/huffman_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package compression_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/algorithms/compression\"\n)\n\nfunc TestHuffmanTree(t *testing.T) {\n\troot := compression.HuffmanTree(\n\t\t[]byte{'A', 'C', 'T', 'G'},\n\t\t[]int{45, 5, 45, 5},\n\t)\n\texpectedRoot := &compression.Node{\n\t\tValue: 100,\n\t\tLeft: &compression.Node{\n\t\t\tChar: 'A',\n\t\t\tValue: 45,\n\t\t},\n\t\tRight: &compression.Node{\n\t\t\tValue: 55,\n\t\t\tLeft: &compression.Node{\n\t\t\t\tValue: 10,\n\t\t\t\tLeft: &compression.Node{\n\t\t\t\t\tChar: 'C',\n\t\t\t\t\tValue: 5,\n\t\t\t\t},\n\t\t\t\tRight: &compression.Node{\n\t\t\t\t\tChar: 'G',\n\t\t\t\t\tValue: 5,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRight: &compression.Node{\n\t\t\t\tChar: 'T',\n\t\t\t\tValue: 45,\n\t\t\t},\n\t\t},\n\t}\n\n\tif !cmp.Equal(root, expectedRoot) {\n\t\tt.Fatal(cmp.Diff(root, expectedRoot))\n\t}\n}\n" }, { "alpha_fraction": 0.5378151535987854, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 9.818181991577148, "blob_id": "614a52812397cf9341232cca2b68b25aa337df19", "content_id": "43d804ea0c56dc1a1381e43b471f62e66833cf6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 119, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/go-by-example/functions.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc sum(a, b int) int {\n\treturn a + b\n}\n\nfunc main() {\n\tfmt.Println(\"1+2 =\", sum(1, 2))\n}\n" }, { "alpha_fraction": 0.6388888955116272, "alphanum_fraction": 0.6442743539810181, "avg_line_length": 36.1368408203125, "blob_id": "2939a6d931e19a70443a2abec234a27c18516ae7", "content_id": "53bb02867510a5f9e3841af2d5baae3582b7a0e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3528, "license_type": "no_license", "max_line_length": 104, "num_lines": 95, "path": "/openstack-installation-guide/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [OpenStack Installation Guide](http://docs.openstack.org/icehouse/install-guide/install/yum/content/)\n\n- [x] Preface\n - [x] Conventions\n - [x] Document change history\n- [x] 1. Architecture\n - [x] Overview\n - [x] Conceptual architecture\n - [x] Example architectures\n- [ ] 2. Basic environment configuration\n - [x] Before you begin\n - [ ] Networking\n - [ ] Network Time Protocol (NTP)\n - [ ] Passwords\n - [ ] Database\n - [ ] OpenStack packages\n - [ ] Messaging server\n- [ ] 3. Configure the Identity Service\n - [ ] Identity Service concepts\n - [ ] Install the Identity Service\n - [ ] Define users, tenants, and roles\n - [ ] Define services and API endpoints\n - [ ] Verify the Identity Service installation\n- [ ] 4. Install and configure the OpenStack clients\n - [ ] Overview\n - [ ] Install the OpenStack command-line clients\n - [ ] Set environment variables using the OpenStack RC file\n - [ ] Create openrc.sh files\n- [ ] 5. Configure the Image Service\n - [ ] Image Service overview\n - [ ] Install the Image Service\n - [ ] Verify the Image Service installation\n- [ ] 6. Configure Compute services\n - [ ] Compute service\n - [ ] Install Compute controller services\n - [ ] Configure a compute node\n- [ ] 7. Add a networking service\n - [ ] OpenStack Networking (neutron)\n - [ ] Legacy networking (nova-network)\n - [ ] Next steps\n- [ ] 8. Add the dashboard\n - [ ] System requirements\n - [ ] Install the dashboard\n - [ ] Set up session storage for the dashboard\n - [ ] Next steps\n- [ ] 9. Add the Block Storage service\n - [ ] Block Storage\n - [ ] Configure a Block Storage service controller\n - [ ] Configure a Block Storage service node\n - [ ] Verify the Block Storage installation\n - [ ] Next steps\n- [ ] 10. Add Object Storage\n - [ ] Object Storage service\n - [ ] System requirements for Object Storage\n - [ ] Plan networking for Object Storage\n - [ ] Example of Object Storage installation architecture\n - [ ] Install Object Storage\n - [ ] Install and configure storage nodes\n - [ ] Install and configure the proxy node\n - [ ] Start services on the storage nodes\n - [ ] Verify the installation\n - [ ] Add another proxy server\n - [ ] Next steps\n- [ ] 11. Add the Orchestration service\n - [ ] Orchestration service overview\n - [ ] Install the Orchestration service\n - [ ] Verify the Orchestration service installation\n - [ ] Next steps\n- [ ] 12. Add the Telemetry module\n - [ ] Telemetry\n - [ ] Install the Telemetry module\n - [ ] Install the Compute agent for Telemetry\n - [ ] Configure the Image Service for Telemetry\n - [ ] Add the Block Storage service agent for Telemetry\n - [ ] Configure the Object Storage service for Telemetry\n - [ ] Verify the Telemetry installation\n - [ ] Next steps\n- [ ] 13. Add the Database service\n - [ ] Database service overview\n - [ ] Install the Database service\n - [ ] Verify the Database service installation\n- [ ] 14. Launch an instance\n - [ ] Launch an instance with OpenStack Networking (neutron)\n - [ ] Launch an instance with legacy networking (nova-network)\n- [ ] A. Reserved user IDs\n- [ ] B. Community support\n - [ ] Documentation\n - [ ] ask.openstack.org\n - [ ] OpenStack mailing lists\n - [ ] The OpenStack wiki\n - [ ] The Launchpad Bugs area\n - [ ] The OpenStack IRC channel\n - [ ] Documentation feedback\n - [ ] OpenStack distribution packages\n- [ ] Glossary\n" }, { "alpha_fraction": 0.5977136492729187, "alphanum_fraction": 0.6009798645973206, "avg_line_length": 14.057376861572266, "blob_id": "ee596a5b9c033a006977b274f4b1886da62c7aba", "content_id": "8fa486e6c64924da31721e357c90451e50ac66b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1837, "license_type": "no_license", "max_line_length": 42, "num_lines": 122, "path": "/data-structures/bst/bst.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package bst\n\ntype SearchTree interface {\n\tSearch(key float64) *Node\n\tMinimum() *Node\n\tMaximum() *Node\n\tSuccessor() *Node\n\tPredecessor() *Node\n\n\tInsert(*Node) *Node\n\tDelete() *Node\n}\n\ntype Node struct {\n\tKey float64\n\tValue interface{}\n\tParent *Node\n\tLeft *Node\n\tRight *Node\n}\n\nfunc (n *Node) Search(key float64) *Node {\n\tif n == nil || key == n.Key {\n\t\treturn n\n\t}\n\tif key < n.Key {\n\t\treturn n.Left.Search(key)\n\t}\n\treturn n.Right.Search(key)\n}\n\nfunc (n *Node) Minimum() *Node {\n\tfor n.Left != nil {\n\t\tn = n.Left\n\t}\n\treturn n\n}\n\nfunc (n *Node) Maximum() *Node {\n\tfor n.Right != nil {\n\t\tn = n.Right\n\t}\n\treturn n\n}\n\nfunc (n *Node) Successor() *Node {\n\tif n.Right != nil {\n\t\treturn n.Right.Minimum()\n\t}\n\tprev := n\n\tfor n.Parent != nil && n.Left != prev {\n\t\tprev = n\n\t\tn = n.Parent\n\t}\n\treturn n\n}\n\nfunc (n *Node) Predecessor() *Node {\n\tif n.Left != nil {\n\t\treturn n.Left.Maximum()\n\t}\n\tprev := n\n\tfor n.Parent != nil && n.Right != prev {\n\t\tprev = n\n\t\tn = n.Parent\n\t}\n\treturn n\n}\n\nfunc (n *Node) Insert(x *Node) *Node {\n\tif n == nil {\n\t\treturn x\n\t}\n\tif x.Key == n.Key {\n\t\tn.Value = x.Value\n\t\treturn n\n\t}\n\tx.Parent = n\n\tif x.Key < n.Key {\n\t\tn.Left = n.Left.Insert(x)\n\t\treturn n\n\t}\n\tn.Right = n.Right.Insert(x)\n\treturn n\n}\n\nfunc (n *Node) Delete() *Node {\n\tif n.Right == nil {\n\t\ttransplant(n, n.Left)\n\t\treturn n.Left\n\t}\n\tif n.Left == nil {\n\t\ttransplant(n, n.Right)\n\t\treturn n.Right\n\t}\n\n\tsuccessor := n.Successor()\n\n\tif n.Right != successor {\n\t\ttransplant(successor, successor.Right)\n\t\tsuccessor.Right = n.Right\n\t\tsuccessor.Right.Parent = successor\n\t}\n\ttransplant(n, successor)\n\tsuccessor.Left = n.Left\n\tsuccessor.Left.Parent = successor\n\treturn successor\n}\n\nfunc transplant(a, b *Node) {\n\tif a.Parent != nil {\n\t\tif a.Parent.Left == a {\n\t\t\ta.Parent.Left = b\n\t\t} else {\n\t\t\ta.Parent.Right = b\n\t\t}\n\t}\n\n\tif b != nil {\n\t\tb.Parent = a.Parent\n\t}\n}\n" }, { "alpha_fraction": 0.2971193492412567, "alphanum_fraction": 0.4353909492492676, "avg_line_length": 26.590909957885742, "blob_id": "4721231acaee2f22597c2253e6fccc47a2ae021b", "content_id": "86c9685d253724c1c2b9138f7a46177ca35fbe35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1215, "license_type": "no_license", "max_line_length": 78, "num_lines": 44, "path": "/x86-64-assembly-language-programming-with-ubuntu/exercises/ch-2-quiz.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "\n1. A picture of the VonNeumann Architexture:\n\n [ cpu ] [ main memory ] [ secondary storage ] [ devices ]\n | | | |\n ----------------------- memory bus ----------------------\n\n2. The memory bus.\n3. Some form of secondary storage, ie disk drives.\n4. Main memory.\n5. The latency for reads and writes to on die cache is much smaller vs reading\n from main memory.\n6. 4 bytes\n7. 1 byte, memory is byte addressable.\n8. 1. LSB is 40\n 2. MSB is 00\n9. 40 4B 4C 00\n10. A picture of RAX register:\n\n [ rax 64 bit ]\n [ eax 32 bit ]\n [ ax 16 bit ]\n [ al ]\n [ ah ]\n\n11. 1. 8\n 2. 64\n 3. 16\n 4. 32\n 5. 64\n 6. 32 # wrong it was 8!\n 7. 8\n 8. 32 # wrong it was 16!\n12. rip\n13. rsp\n14. 00 00 00 00 00 00 00 00\n 00 00 00 00 00 00 00 05\n 00 00 00 00 00 00 00 07\n 00 00 00 00 00 00 00 20\n 00 00 00 00 00 00 00 00\n15. 01 23 45 67 89 AB CD EF\n al: EF\n ax: CD EF\n eax: 89 AB CD EF\n rax: 01 23 45 67 89 AB CD EF\n" }, { "alpha_fraction": 0.48087432980537415, "alphanum_fraction": 0.4888226389884949, "avg_line_length": 15.233870506286621, "blob_id": "99143311333c8603f85520cead1f495635438b58", "content_id": "689958c79c66959e26b634ba71ece1a424cc3c92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2013, "license_type": "no_license", "max_line_length": 53, "num_lines": 124, "path": "/parsers/flbconfig/parse_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package flbconfig_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/jasonkeene/playground/parsers/flbconfig\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tinput string\n\t\texpected flbconfig.File\n\t\terr bool\n\t}{\n\t\t\"empty\": {\n\t\t\texpected: flbconfig.File{\n\t\t\t\tName: \"test.config\",\n\t\t\t\tSections: []flbconfig.Section{\n\t\t\t\t\t{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"extra whitespace\": {\n\t\t\tinput: `\n\t\t\t\t[section] \n\t\t\t\tkey val \n\t\t\t`,\n\t\t\texpected: flbconfig.File{\n\t\t\t\tName: \"test.config\",\n\t\t\t\tSections: []flbconfig.Section{\n\t\t\t\t\t{},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"section\",\n\t\t\t\t\t\tKeyValues: []flbconfig.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"key\",\n\t\t\t\t\t\t\t\tValue: \"val \",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"normal\": {\n\t\t\tinput: `\n\nbareKey bareValue\n\n[sectionA]\nkeyA1 valA1\nkeyA2 valA2\n\n[sectionB]\nkeyB1 valB1\nkeyB2 valB2\n\n\t\t`,\n\t\t\texpected: flbconfig.File{\n\t\t\t\tName: \"test.config\",\n\t\t\t\tSections: []flbconfig.Section{\n\t\t\t\t\t{\n\t\t\t\t\t\tKeyValues: []flbconfig.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"bareKey\",\n\t\t\t\t\t\t\t\tValue: \"bareValue\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sectionA\",\n\t\t\t\t\t\tKeyValues: []flbconfig.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyA1\",\n\t\t\t\t\t\t\t\tValue: \"valA1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyA2\",\n\t\t\t\t\t\t\t\tValue: \"valA2\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sectionB\",\n\t\t\t\t\t\tKeyValues: []flbconfig.KeyValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyB1\",\n\t\t\t\t\t\t\t\tValue: \"valB1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"keyB2\",\n\t\t\t\t\t\t\t\tValue: \"valB2\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"two sections on one line\": {\n\t\t\tinput: `\n[sectionA][sectionB]\nkey val\n`,\n\t\t\terr: true,\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tf, err := flbconfig.Parse(\"test.config\", tc.input)\n\n\t\t\tif !cmp.Equal(f, tc.expected) {\n\t\t\t\tt.Error(cmp.Diff(f, tc.expected))\n\t\t\t}\n\n\t\t\tif tc.err && err == nil {\n\t\t\t\tt.Error(\"expected err, was nil\")\n\t\t\t}\n\t\t\tif !tc.err && err != nil {\n\t\t\t\tt.Errorf(\"unexpected err: %s\", err)\n\t\t\t}\n\t\t})\n\t}\n}\n" }, { "alpha_fraction": 0.5534312129020691, "alphanum_fraction": 0.5694776177406311, "avg_line_length": 16.859756469726562, "blob_id": "0204b4a79bbb7638cdd4716daf7a8569b3ed0492", "content_id": "39fc90bc24dea8921b352e9312a3b1d17eb55a42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2929, "license_type": "no_license", "max_line_length": 91, "num_lines": 164, "path": "/lcs/lcs.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package lcs\n\nfunc Recursive(a, b string) string {\n\tif len(a) == 0 || len(b) == 0 {\n\t\treturn \"\"\n\t}\n\n\tif a[0] == b[0] {\n\t\tresult := a[0:1]\n\t\tif len(a) == 1 || len(b) == 1 {\n\t\t\treturn result\n\t\t}\n\t\treturn result + Recursive(a[1:], b[1:])\n\t}\n\n\tvar resultA, resultB string\n\tif len(a) != 1 {\n\t\tresultA = Recursive(a[1:], b)\n\t}\n\tif len(b) != 1 {\n\t\tresultB = Recursive(a, b[1:])\n\t}\n\tif len(resultA) > len(resultB) {\n\t\treturn resultA\n\t}\n\treturn resultB\n}\n\ntype MemKey struct {\n\tA string\n\tB string\n}\n\nfunc RecursiveMemoized(a, b string, mem map[MemKey]string) string {\n\tif r, ok := mem[MemKey{a, b}]; ok {\n\t\treturn r\n\t}\n\n\tif len(a) == 0 || len(b) == 0 {\n\t\tmem[MemKey{a, b}] = \"\"\n\t\treturn \"\"\n\t}\n\n\tif a[0] == b[0] {\n\t\tresult := a[0:1]\n\t\tif len(a) == 1 || len(b) == 1 {\n\t\t\tmem[MemKey{a, b}] = result\n\t\t\treturn result\n\t\t}\n\t\tresult += RecursiveMemoized(a[1:], b[1:], mem)\n\t\tmem[MemKey{a, b}] = result\n\t\treturn result\n\t}\n\n\tvar resultA, resultB string\n\tif len(a) != 1 {\n\t\tresultA = RecursiveMemoized(a[1:], b, mem)\n\t}\n\tif len(b) != 1 {\n\t\tresultB = RecursiveMemoized(a, b[1:], mem)\n\t}\n\tif len(resultA) > len(resultB) {\n\t\tmem[MemKey{a, b}] = resultA\n\t\treturn resultA\n\t}\n\tmem[MemKey{a, b}] = resultB\n\treturn resultB\n}\n\nfunc RecursiveIndexes(a, b string, i, j int) string {\n\tif len(a) == i || len(b) == j {\n\t\treturn \"\"\n\t}\n\n\tif a[i] == b[j] {\n\t\treturn a[i:i+1] + RecursiveIndexes(a, b, i+1, j+1)\n\t}\n\n\tvar resultA, resultB string\n\tif len(a) != i+1 {\n\t\tresultA = RecursiveIndexes(a, b, i+1, j)\n\t}\n\tif len(b) != j+1 {\n\t\tresultB = RecursiveIndexes(a, b, i, j+1)\n\t}\n\tif len(resultA) > len(resultB) {\n\t\treturn resultA\n\t}\n\treturn resultB\n}\n\ntype MemKeyIndexes struct {\n\tI int\n\tJ int\n}\n\nfunc RecursiveIndexesMemoized(a, b string, i, j int, mem map[MemKeyIndexes]string) string {\n\tif r, ok := mem[MemKeyIndexes{i, j}]; ok {\n\t\treturn r\n\t}\n\n\tif len(a) == i || len(b) == j {\n\t\tmem[MemKeyIndexes{i, j}] = \"\"\n\t\treturn \"\"\n\t}\n\n\tif a[i] == b[j] {\n\t\tresult := a[i:i+1] + RecursiveIndexesMemoized(a, b, i+1, j+1, mem)\n\t\tmem[MemKeyIndexes{i, j}] = result\n\t\treturn result\n\t}\n\n\tvar resultA, resultB string\n\tif len(a) != i+1 {\n\t\tresultA = RecursiveIndexesMemoized(a, b, i+1, j, mem)\n\t}\n\tif len(b) != j+1 {\n\t\tresultB = RecursiveIndexesMemoized(a, b, i, j+1, mem)\n\t}\n\tif len(resultA) > len(resultB) {\n\t\tmem[MemKeyIndexes{i, j}] = resultA\n\t\treturn resultA\n\t}\n\tmem[MemKeyIndexes{i, j}] = resultB\n\treturn resultB\n}\n\ntype DiffResult struct {\n\tA string\n\tB string\n}\n\nfunc Diff(a, b string) DiffResult {\n\tlcs := RecursiveIndexesMemoized(a, b, 0, 0, map[MemKeyIndexes]string{})\n\treturn DiffResult{\n\t\tA: genP(a, lcs, \"-\"),\n\t\tB: genP(b, lcs, \"+\"),\n\t}\n}\n\nfunc genP(p, lcs, op string) string {\n\tvar (\n\t\tresult string\n\t\ts int\n\t\td bool\n\t)\n\tfor i := 0; i < len(p); i++ {\n\t\tif p[i] == lcs[s] {\n\t\t\tif d {\n\t\t\t\td = false\n\t\t\t\tresult += \">\"\n\t\t\t}\n\t\t\tresult += p[i : i+1]\n\t\t\ts++\n\t\t} else {\n\t\t\tif !d {\n\t\t\t\td = true\n\t\t\t\tresult += op + \"<\"\n\t\t\t}\n\t\t\tresult += p[i : i+1]\n\t\t}\n\t}\n\treturn result\n}\n" }, { "alpha_fraction": 0.7556818127632141, "alphanum_fraction": 0.7556818127632141, "avg_line_length": 15, "blob_id": "13e82b0b3ef6ff4242d569b19bf8284f698d5328", "content_id": "ae27f588789b7d7e765070dc879885ac1d37291e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 176, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/algorithms/sort/quick_test.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package sort_test\n\nimport (\n\t\"testing\"\n\n\tsort \"github.com/jasonkeene/playground/algorithms/sort\"\n)\n\nfunc TestQuickCorrectness(t *testing.T) {\n\ttestCorrectness(t, sort.Quick)\n}\n" }, { "alpha_fraction": 0.7543782591819763, "alphanum_fraction": 0.7543782591819763, "avg_line_length": 42.92307662963867, "blob_id": "2c34475a3beedf55081622d277e49c1f363b00f0", "content_id": "e6bf707c5867af048d14e64759630798bae33e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2284, "license_type": "no_license", "max_line_length": 140, "num_lines": 52, "path": "/coreos/progress.md", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "## [Docs](https://coreos.com/docs/)\n\n - [x] [Quick Start](https://coreos.com/docs/quickstart/)\n\n### Running CoreOS\n\n - [x] [Vagrant](https://coreos.com/docs/running-coreos/platforms/vagrant/)\n\n### Cluster Management\n\n - [x] [Install Debugging Tools](https://coreos.com/docs/cluster-management/debugging/install-debugging-tools)\n\n### Launching Containers\n\n#### Configuring fleet\n\n - [x] [fleet Configuration and API](https://coreos.com/docs/launching-containers/config/fleet-deployment-and-configuration)\n\n#### Launching Containers\n\n - [x] [Launching Containers with fleet](https://coreos.com/docs/launching-containers/launching/launching-containers-fleet/)\n - [x] [Getting Started with systemd](https://coreos.com/docs/launching-containers/launching/getting-started-with-systemd/)\n - [x] [Overview of systemctl](https://coreos.com/docs/launching-containers/launching/overview-of-systemctl/)\n - [x] [Example Deployment with fleet](https://coreos.com/docs/launching-containers/launching/fleet-example-deployment/)\n - [x] [Controlling the Cluster with fleetctl](https://coreos.com/docs/launching-containers/launching/fleet-using-the-client/)\n - [x] [Fleet Unit Files](https://coreos.com/docs/launching-containers/launching/fleet-unit-files/)\n\n#### Building Containers\n\n - [x] [Getting Started with Docker](https://coreos.com/docs/launching-containers/building/getting-started-with-docker/)\n - [ ] [Using Authentication for a Registry](https://coreos.com/docs/launching-containers/building/registry-authentication/)\n - [ ] [Customizing docker](https://coreos.com/docs/launching-containers/building/customizing-docker/)\n\n\n### Distributed Configuration\n\n - [ ] [Getting Started with etcd](https://coreos.com/docs/distributed-configuration/getting-started-with-etcd/)\n\n\n### SDK & Distributors\n\n - [ ] [Modifying CoreOS](https://coreos.com/docs/sdk-distributors/sdk/modifying-coreos/)\n\n\n## [Blog](https://coreos.com/blog/)\n\n - [x] [Clustering CoreOS with Vagrant](https://coreos.com/blog/coreos-clustering-with-vagrant/)\n\n\n## [Kubernetes on CoreOS](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/getting-started-guides/coreos.md)\n\n - [ ] [Multi-node cluster using cloud-config and Vagrant](https://github.com/pires/kubernetes-vagrant-coreos-cluster/blob/master/README.md)\n" }, { "alpha_fraction": 0.5931721329689026, "alphanum_fraction": 0.6073968410491943, "avg_line_length": 20.96875, "blob_id": "5cbd9cf20e3b5d173c84d60ca04d19ac8168f9d8", "content_id": "90a1f3355cfff385b091a4f6ca4634cd96bbf608", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 63, "num_lines": 32, "path": "/monte-carlo-pi/monte_carlo_pi.py", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"Naive Monte Carlo approximation of pi.\"\"\"\n\nfrom __future__ import division\n\nimport math\nfrom random import random\nfrom itertools import repeat\n\n\nITERATIONS = 10 ** 7\n\n\ndef in_circle(x, y):\n \"\"\"Determine if a given point is inside the unit circle.\"\"\"\n x, y = abs(x), abs(y)\n return y <= math.sqrt(1.0 - x ** 2.0)\n\n\ndef main(iterations=ITERATIONS):\n \"\"\"Approximate pi and print the result.\"\"\"\n count = 0\n for x in repeat(None, iterations):\n if in_circle(random(), random()):\n count += 1\n print 'iterations:', iterations\n print ' approx:', count / iterations * 4\n print ' math.pi:', math.pi\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.673202633857727, "alphanum_fraction": 0.7549019455909729, "avg_line_length": 29.600000381469727, "blob_id": "006b8ceff3db05bbac307e47291b2c81301591c6", "content_id": "d59dc9367d08ff1309b18dd28dec4ef4e474a524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 306, "license_type": "no_license", "max_line_length": 107, "num_lines": 10, "path": "/saltstack/prototypes/bin/install-minion.sh", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# install salt minion\nadd-apt-repository --yes ppa:saltstack/salt\nwget -q -O- \"http://keyserver.ubuntu.com:11371/pks/lookup?op=get&search=0x4759FA960E27C0A6\" | apt-key add -\napt-get update\napt-get install --yes salt-minion=2014.7.0+ds-2trusty1\n\n# stop miniond\nservice salt-minion stop\n" }, { "alpha_fraction": 0.4522292912006378, "alphanum_fraction": 0.47558385133743286, "avg_line_length": 13.71875, "blob_id": "6f99dfd74636ba3df75b17f33fa0273c2e288dd3", "content_id": "9686e52debe77d8c109ab9d462984b408e44557c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 471, "license_type": "no_license", "max_line_length": 44, "num_lines": 32, "path": "/the-go-programming-language/ch4/src/slices/rotate.go", "repo_name": "jasonkeene/playground", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc main() {\n\tvalues := []int{1, 2, 3, 4, 5}\n\tfor i := 0; i < 15; i++ {\n\t\ts := make([]int, len(values))\n\t\tcopy(s, values)\n\t\trotate(s, i)\n\t\tfmt.Printf(\"rotated %d times: %v\\n\", i, s)\n\t}\n}\n\nfunc rotate(s []int, c int) {\n\tc = c % len(s)\n\tif c == 0 {\n\t\treturn\n\t}\n\tvar ac []int\n\tfor i, v := range s {\n\t\tac = append(ac, v)\n\t\tif i < c {\n\t\t\t// look behind\n\t\t\ts[i] = s[len(s)-c+i]\n\t\t} else {\n\t\t\t// copy from ac\n\t\t\ts[i] = ac[0]\n\t\t\tac = ac[1:]\n\t\t}\n\t}\n}\n" } ]
229
hanchak/WebScrape
https://github.com/hanchak/WebScrape
a7ee51d4c9a6172e8c66ba41f59a21f7c0c861c4
e6c97ef1af83bf5aa6501b47e3f7a3473a8447eb
ca7bd7fd86bd9709cda91890a94261bbe230078e
refs/heads/master
2021-07-09T04:40:00.237619
2017-10-06T19:37:15
2017-10-06T19:37:15
103,679,922
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5869676470756531, "alphanum_fraction": 0.5910723209381104, "avg_line_length": 29.754098892211914, "blob_id": "5557893ff98a5e9a6f8a03f114f8554cca144f70", "content_id": "5208c45c5180b0a2eb0a18ce230f9c312c73b245", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/wikipedia_api.py", "repo_name": "hanchak/WebScrape", "src_encoding": "UTF-8", "text": "'''\r\nSimple Wikipedia Web Crawler using json and beautiful soup\r\n\r\nThe first part searchs wikipedia for your term using their API (returns json)\r\n\r\nThe second part picks a link and crawls randomly through subsequent pages\r\n\r\nM. Hanchak\r\n06OCT17\r\n'''\r\n\r\nimport random\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\nimport requests\r\n\r\n# this is the wikipedia API:\r\nsearchUrl = 'http://en.wikipedia.org/w/api.php?action=query&format=json&list=search&srsearch='\r\n\r\n# ###################### Enter your search term ################################\r\nsearchTerm = 'science'\r\n# ##############################################################################\r\n\r\nprint(searchTerm)\r\n\r\n# get wikipedia's search results from your term\r\npage = requests.get(searchUrl + searchTerm)\r\nparsed = json.loads(page.text)\r\n\r\n# json stuff to get the actual links:\r\nlinks = [item['title'] for item in parsed['query']['search'] ]\r\n\r\n# get one of them at random and get its wikipedia page\r\npageUrl = 'http://en.wikipedia.org/wiki/'\r\ngetOne = random.choice(links).replace(' ','_')\r\nprint(getOne)\r\npage = requests.get(pageUrl + getOne)\r\n\r\n# loop through other random links in the subsequent pages\r\nfor i in range(10):\r\n try:\r\n soup = BeautifulSoup(page.text, 'html.parser') # soupify the html\r\n \r\n content = soup.find(id=\"mw-content-text\") # get links only in this structure\r\n \r\n newlinks = [a['title'] for a in content.findAll('a') if a.has_attr('title')] \r\n # get only good links\r\n \r\n getOne = random.choice(newlinks) # pick one\r\n \r\n while getOne.find(':') != -1:\r\n getOne = random.choice(newlinks) # make sure it doesnt have a colon in it.\r\n \r\n \r\n pageUrl = 'http://en.wikipedia.org/wiki/' + getOne.replace(' ','_') # form new url\r\n \r\n page = requests.get(pageUrl) # get new page (try to anyway)\r\n print(getOne)\r\n\r\n except:\r\n pass\r\n \r\n \r\n" }, { "alpha_fraction": 0.5099601745605469, "alphanum_fraction": 0.5235059857368469, "avg_line_length": 17.88888931274414, "blob_id": "9c856b25adc8360cba44a5b3e4df6f03e8c50b83", "content_id": "b6f646d1d227f8612bed6eae9cbd5b8133672e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1255, "license_type": "no_license", "max_line_length": 78, "num_lines": 63, "path": "/wordlist.py", "repo_name": "hanchak/WebScrape", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nexplore word list from duolingo inspect element\r\n\r\nCreated on Thu Aug 31 09:13:00 2017\r\n\r\n@author: HanchaMS\r\n\"\"\"\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nwith open(\"wordlist2.txt\") as f:\r\n \r\n htmlcode = f.read()\r\n\r\nsouped = soup(htmlcode, \"html.parser\") \r\n\r\n\r\n'''\r\n\r\n\r\ncontainers = souped.findAll(\"span\",{\"class\":\"hoverable-word hover\"})\r\n\r\nfor container in containers:\r\n print(container.text)\r\n \r\n\r\ncontainers = souped.findAll('span',{'class':'hover'})\r\n\r\n\r\nwith open(\"words.csv\", 'w') as f:\r\n for container in containers:\r\n f.write(container.text + '\\n')\r\n'''\r\nparts = ( \r\n 'Adjective',\r\n 'Adverb',\r\n 'Conjunction',\r\n 'Determiner',\r\n 'Interjection',\r\n 'Noun',\r\n 'Numeral',\r\n 'Preposition',\r\n 'Pronoun',\r\n 'Proper noun',\r\n 'Verb')\r\n\r\ntemp = souped.findAll('td')\r\n\r\nwith open(\"words2.txt\", 'w') as f:\r\n for i,q in enumerate(temp):\r\n \r\n try:\r\n # try to print the word and the part of speech (usually one later)\r\n f.write(q['data-string'])\r\n w = temp[i+1].text\r\n \r\n if w in parts:\r\n f.write(',' + w + '\\n')\r\n else:\r\n f.write(',\\n')\r\n \r\n except:\r\n pass\r\n\r\n" }, { "alpha_fraction": 0.5491923689842224, "alphanum_fraction": 0.5602055788040161, "avg_line_length": 28.727272033691406, "blob_id": "bfdab363f68da09ac11f6766e8810c60800b847b", "content_id": "f67b47487ea963c07fa64acbab7df057b42ec4b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1362, "license_type": "no_license", "max_line_length": 97, "num_lines": 44, "path": "/web_scrape_2.py", "repo_name": "hanchak/WebScrape", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nWeb scraping example 2\r\n\"\"\"\r\n\r\nfrom bs4 import BeautifulSoup as soup\r\nfrom urllib.request import urlopen as uReq\r\n\r\nmy_url = 'https://www.newegg.com/2-in-1-Laptops/SubCategory/ID-3090?Tpk=ultrabook'\r\n \r\nuClient = uReq(my_url)\r\npage_html = uClient.read()\r\nuClient.close()\r\n\r\npage_soup = soup(page_html, 'html.parser')\r\n\r\ncontainers = page_soup.findAll('div',{'class':'item-container'})\r\n\r\n\r\nfilename = 'products.csv'\r\n\r\nwith open(filename,'w') as f:\r\n\r\n headers = 'brand, product_name, shipping, price\\n'\r\n f.write(headers)\r\n \r\n for container in containers[:8]:\r\n brand = container.div.div.a.img['title']\r\n #brand = 'None'\r\n \r\n title_container = container.findAll('a',{'class':'item-title'})\r\n product_name = title_container[0].text\r\n \r\n shipping_container = container.findAll('li',{'class':'price-ship'})\r\n shipping = shipping_container[0].text.strip() \r\n \r\n temp = container.findAll('li',{'class':'price-current'})[0].text.strip()\r\n price = temp[3:9]\r\n \r\n #print('brand:' + brand)\r\n #print('product name:' + product_name)\r\n #print('shipping cost:' + shipping)\r\n \r\n f.write(brand + ',' + product_name.replace(',','|') + ',' + shipping + ',' + price +'\\n')\r\n \r\n" }, { "alpha_fraction": 0.7805642485618591, "alphanum_fraction": 0.7805642485618591, "avg_line_length": 52.16666793823242, "blob_id": "be7b2c7214d635adaf9025703bc0cbc34c7a98b4", "content_id": "ebb5ea929a3d5f2acf3c389f1d83ba30e6bd2ef8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 319, "license_type": "no_license", "max_line_length": 209, "num_lines": 6, "path": "/README.md", "repo_name": "hanchak/WebScrape", "src_encoding": "UTF-8", "text": "# WebScraping\n## with jupyter notebook\n\nThis is a simple repository for web scraping with Jupyter Notebook.\n\nThe notebook code is [here](https://github.com/hanchak/WebScrape/blob/master/ScrapeTheWeather.ipynb). It's based off of code I found at this [site](https://www.dataquest.io/blog/web-scraping-tutorial-python/).\n" } ]
4
MingXu-123/CMU-15112-HW
https://github.com/MingXu-123/CMU-15112-HW
a4ffa0af56a571d37fbcd5d9765c24bc1147d06e
f53b0193aaf8a15d30b8d634b944372b0ac94aa0
ec37cece4951ca0778e52469c1898deaeeb14df4
refs/heads/master
2020-12-14T03:44:17.750911
2020-01-17T19:57:38
2020-01-17T19:57:38
234,625,845
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7167785167694092, "alphanum_fraction": 0.7315436005592346, "avg_line_length": 27.653846740722656, "blob_id": "639f0c28bb36f5c8881935ab7ca3026eaca55ecb", "content_id": "9f5d3456335cb06a97218d481921f1119b4deeda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "no_license", "max_line_length": 69, "num_lines": 26, "path": "/15112-CMU/112-opencv-tutorial-master/openingImages.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# Import opencv\nimport cv2\n\nwindow_name = \"Images\"\n\n# Importantly, images are stored as BGR\n# Use the following function to read images.\nimage = cv2.imread(\"lightCat.jpg\")\n# Error checking to make sure that our image actually loaded properly\n# Might fail if we have an invalid file name (or otherwise)\nif image is not None:\n # Display our loaded image in a window with window_name\n cv2.imshow(window_name, image)\n # Wait for any key to be pressed\n cv2.waitKey(0)\n\n# Load another image, this time in grayscale directly\nimage = cv2.imread(\"LightCat.jpg\", cv2.CV_LOAD_IMAGE_GRAYSCALE)\nif image is not None:\n cv2.imshow(window_name, image)\n cv2.waitKey(0)\n\n# Clean up before we exit!\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.48852458596229553, "alphanum_fraction": 0.5318032503128052, "avg_line_length": 39.66666793823242, "blob_id": "3250675865547c68d489ad8243d49400bac697e8", "content_id": "d82561d01c52b566723e2e8cc37ffb361d2d0d5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3050, "license_type": "no_license", "max_line_length": 114, "num_lines": 75, "path": "/15112-CMU/week2/tesettttt!.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10**-9\n\ndef drawThreadPattern(canvas, size, numSpokes, startSpoke, numSkips):\n #print(\"I am here\")\n import math\n (cx, cy, r) = (size/2, size/2, size/2 * 0.9)\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, outline='black', width=r/30)\n for i in range(numSpokes):\n iAngle = math.pi / 2 - (2 * math.pi) * (i / numSpokes) - math.pi # !!!!!!!!!!!!!\n ix = cx + r * math.cos(iAngle)\n iy = cy - r * math.sin(iAngle)\n ixt = cx + 0.85 * r * math.cos(iAngle)\n iyt = cy - 0.85 * r * math.sin(iAngle)\n\n canvas.create_line(ixold, iyold, ixnew, iynew, width=1)\n canvas.create_text(ixt, iyt, text= str(i), font=\"Arial 16 bold\", width=10)\n if i == startSpoke:\n canvas.create_oval(ix - (r/20)*1.2, iy - (r/20)*1.1, ix + (r/20)*1.1, iy + (r/20)*1.1,\n fill='green', outline='black', width=1)\n else:\n canvas.create_oval(ix - (r / 20) * 1.2, iy - (r / 20) * 1.1, ix + (r / 20) * 1.1, iy + (r / 20) * 1.1,\n fill='red', outline='black', width=1)\n\n for n in range(numSpokes):\n if n == startSpoke:\n begin = True\n fixAngle = math.pi / 2 - (2 * math.pi) * (n / numSpokes) - math.pi\n xfix = cx + 0.95 * r * math.cos(fixAngle)\n yfix = cy - 0.95 * r * math.sin(fixAngle)\n print(xfix, yfix)\n nAngle = math.pi / 2 - (2 * math.pi) * (n / numSpokes) - math.pi\n nxold = cx + 0.95 * r * math.cos(nAngle)\n nyold = cy - 0.95 * r * math.sin(nAngle)\n while begin:\n # nAngle = math.pi / 2 - (2 * math.pi) * (n / numSpokes) - math.pi\n # nxold = cx + 0.95 * r * math.cos(nAngle)\n # nyold = cy - 0.95 * r * math.sin(nAngle)\n nAnglenew = nAngle - (2 * math.pi) * (numSkips/numSpokes)\n nxnew = cx + 0.95 * r * math.cos(nAnglenew)\n nynew = cy - 0.95 * r * math.sin(nAnglenew)\n canvas.create_line(nxold, nyold, nxnew, nynew, width=1)\n nAngle = nAnglenew\n #print(nxnew, nynew)\n if abs(nxnew - xfix) < 0.0001 and abs(nynew - yfix) < 0.0001:\n break\n nxold, nyold = nxnew, nynew\n\n\n\n\n\ndef runDrawThreadPattern(width, height, numSpokes, startSpoke, numSkips):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # width must equal height\n assert(width == height)\n drawThreadPattern(canvas, width, numSpokes, startSpoke, numSkips)\n root.mainloop()\n\n\ndef testDrawThreadPattern():\n print(\"Testing drawThreadPattern...\", end=\"\")\n runDrawThreadPattern(400, 400, 12, 0, 5)\n runDrawThreadPattern(200, 200, 10, 3, 4)\n runDrawThreadPattern(500, 500, 19, 8, 15)\n print(\"Done.\")\n\ntestDrawThreadPattern()\n" }, { "alpha_fraction": 0.5001832842826843, "alphanum_fraction": 0.5265009999275208, "avg_line_length": 31.93781089782715, "blob_id": "445d5c325519793126796ae8bf9e756ac17388e3", "content_id": "66db5e2c555adc3705fff750241bf23c0d9fb7a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13641, "license_type": "no_license", "max_line_length": 132, "num_lines": 402, "path": "/15112-CMU/BeerGame/_BierGame.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n#\r\n#\r\n#\r\n#\r\n#\r\n\r\n\"\"\"\r\n#TODO: documentation!\r\n\"\"\"\r\n\r\n__author__ = \"$Author: DR0ID $\"\r\n__version__ = \"$Revision: 94 $\"\r\n__date__ = \"$Date: 2007-08-04 12:22:42 +0200 (Sa, 04 Aug 2007) $\"\r\n__license__ = ''\r\n__copyright__ = \"DR0ID (c) 2007\"\r\n__url__ = \"http://www.mypage.bluewin.ch/DR0ID/index.html\"\r\n__email__ = \"[email protected]\"\r\n\r\n\r\nimport pygame\r\nimport sys\r\nimport _FastRenderGroup as FastRenderGroup\r\nimport random\r\nimport os\r\njoin = os.path.join\r\norig_load = pygame.image.load\r\n\r\ndef loading(file_name):\r\n return orig_load(join('data', file_name))\r\n\r\npygame.image.load = loading\r\n\r\nSCREEN_SIZE = (800, 900)\r\nPLAY_INTERVALL = (200, 600)\r\n # [(speed, speed_deviation, num_beers_gen, intervall),point limit, beer fill amount, picture]\r\n##LEVEL_CONFIG = [(2, (-1, 1), (0,1), (55,60), 1000, 50 ,\"pic1.JPG\"),\\\r\n## (3, (-2, 1), (0,2), (55,60), 2000, 50, \"pic2.JPG\"),\\\r\n## (8, (-2, 4), (0,4), (10,40), 3000, 50, \"pic3.JPG\"),\\\r\n## (8, (-0, 1), (0,6), (10,40), 100000, 50, \"pic4.JPG\")]\r\nLEVEL_CONFIG = []\r\n\r\nlevel_file = open(join('data','config.txt'))\r\nline_num = 0\r\nfor line in level_file.readlines():\r\n entries = line.split(',')\r\n if entries[0][0] == '#':\r\n continue\r\n if len(entries) != 10:\r\n print (\"Wrong number of values in config.txt, line:\", line_num)\r\n sys.exit()\r\n speed = int(entries[0].strip())\r\n speed_dev1 = int(entries[1].strip())\r\n speed_dev2 = int(entries[2].strip())\r\n num_b_gen1 = int(entries[3].strip())\r\n num_b_gen2 = int(entries[4].strip())\r\n intervall1 = int(entries[5].strip())\r\n intervall2 = int(entries[6].strip())\r\n point_limit = int(entries[7].strip())\r\n fill_amount = int(entries[8].strip())\r\n pic_name = entries[9].strip()\r\n data = (speed, (speed_dev1, speed_dev2), (num_b_gen1, num_b_gen2), (intervall1, intervall2), point_limit, fill_amount, pic_name)\r\n LEVEL_CONFIG.append(data)\r\n line_num += 1\r\n\r\n\r\n\r\n\r\n\r\nclass Level(FastRenderGroup.DirtySprite):\r\n \r\n speed = 0\r\n speed_deviation = 0\r\n num_beers_generation = 0\r\n intervall = 0\r\n level_limit = 0\r\n fill_amount = 0\r\n \r\n \r\n current_level = -1\r\n glas = None\r\n points = 0\r\n running = True\r\n deckels = None\r\n score = None\r\n \r\n def __init__(self):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self._missed = 0\r\n self.next_level()\r\n \r\n def reset(self):\r\n self.__class__.points = 0\r\n self.__class__.running = True\r\n self.running = True\r\n self.__class__.current_level = -1\r\n self._missed = 0\r\n self.next_level()\r\n \r\n def next_level(self):\r\n self.__class__.current_level += 1\r\n if self.__class__.current_level == len(LEVEL_CONFIG):\r\n self.__class__.current_level -= 1\r\n self.__class__.speed ,\\\r\n self.__class__.speed_deviation , \\\r\n self.__class__.num_beers_generation , \\\r\n self.__class__.intervall, \\\r\n self.__class__.level_limit, \\\r\n self.__class__.fill_amount, \\\r\n picture \\\r\n = LEVEL_CONFIG[self.__class__.current_level]\r\n self.image = pygame.image.load(picture)\r\n self.rect = self.image.get_rect(topleft = (SCREEN_SIZE[0]-self.image.get_width(), 50))\r\n## print \"what?:\", self.rect\r\n self.dirty = 1\r\n if self.glas is not None:\r\n self.glas.fill()\r\n## for key, value in self.__class__.__dict__.items():\r\n## print key, \"=\", value\r\n \r\n def add_points(self):\r\n self.score.scored(100)\r\n if self.score.points >= self.__class__.level_limit:\r\n self.next_level()\r\n \r\n def missed(self):\r\n self._missed += 1\r\n if self.running:\r\n if len(self.deckels):\r\n self.deckels[-1].kill()\r\n self.deckels.pop(-1)\r\n## print len(self.deckels)\r\n if self._missed == 3:\r\n self.running = False\r\n## print \"Game over!!!!!!!!!!!!!\"\r\n## print \"MISSED: \", self._missed\r\n \r\n \r\nLEVEL = Level()\r\n\r\nclass Score(FastRenderGroup.DirtySprite):\r\n \r\n def __init__(self):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.font = pygame.font.Font(\"freesansbold.ttf\", 50)\r\n self.points = 0\r\n self.image = self.font.render(str(0), 2, (255, 0, 0))\r\n self.rect = self.image.get_rect(topright=(200,self.image.get_height()/2))\r\n \r\n def scored(self, points):\r\n self.points += points\r\n self.image = self.font.render(str(self.points), 2, (255, 0,0))\r\n self.rect = self.image.get_rect(topright= self.rect.topright)\r\n self.dirty = 1\r\n print (self.points)\r\n\r\nclass Bier(FastRenderGroup.DirtySprite):\r\n \r\n def __init__(self):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.image = pygame.image.load(\"Bier.PNG\").convert()\r\n self.image.set_colorkey((255, 0, 255))\r\n self.rect = self.image.get_rect(topleft=(0,SCREEN_SIZE[1]-225))\r\n self.source_rect = pygame.Rect(0, 0, self.rect.width, 0)\r\n \r\n def fill(self, amount=50):\r\n## print \"filling\"\r\n if self.rect.height > self.source_rect.height:\r\n amount = LEVEL.fill_amount\r\n self.source_rect.height += amount\r\n self.rect.top -= amount\r\n else:\r\n self.source_rect.height = self.rect.height\r\n self.dirty = 1\r\n\r\n\r\nclass Glas(FastRenderGroup.DirtySprite):\r\n \r\n def __init__(self):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.image = pygame.image.load(\"Glas.PNG\").convert_alpha()\r\n self.image.set_colorkey((255, 0, 255))\r\n self.rect = self.image.get_rect(topleft=(0,SCREEN_SIZE[1]-self.image.get_height()-200))\r\n self.bier = None\r\n \r\n def fill(self):\r\n self.bier.fill()\r\n\r\nclass Deckel(FastRenderGroup.DirtySprite):\r\n \r\n num = 0\r\n \r\n def __init__(self):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.image = pygame.image.load(\"Deckel.PNG\").convert()\r\n self.image.set_colorkey((255, 0, 255))\r\n self.rect = self.image.get_rect(topleft=(self.__class__.num*60+20,SCREEN_SIZE[1]-self.image.get_height()-50))\r\n self.__class__.num += 1\r\n\r\n\r\nclass Flasche(FastRenderGroup.DirtySprite):\r\n \r\n image = None\r\n group = None\r\n \r\n def __init__(self, speed):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.speed = speed + random.randint(*LEVEL.speed_deviation)\r\n if self.__class__.image is None:\r\n self.__class__.image = pygame.image.load(\"Flasche.PNG\").convert()\r\n self.__class__.image.set_colorkey((255, 0, 255))\r\n self.image = self.__class__.image\r\n if self.__class__.group is None:\r\n self.__class__.group = pygame.sprite.Group()\r\n self.__class__.group.add(self)\r\n self.__class__.renderer.add(self)\r\n a, b = PLAY_INTERVALL\r\n w = self.image.get_width()/2\r\n self.rect = self.image.get_rect(topleft = (random.randint(a+w, b-w), -self.image.get_height()))\r\n self.dirty = 2\r\n## print \"beer\"\r\n \r\n def update(self):\r\n self.rect.y += self.speed\r\n if self.rect.top > SCREEN_SIZE[1]:\r\n for group in self.groups():\r\n group.remove(self)\r\n LEVEL.missed()\r\n \r\n def kill(self):\r\n LEVEL.add_points()\r\n FastRenderGroup.DirtySprite.kill(self)\r\n## print LEVEL.points\r\n \r\n \r\nclass Harasse(FastRenderGroup.DirtySprite):\r\n \r\n \r\n def __init__(self):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.image = pygame.image.load(\"Harasse.PNG\").convert()\r\n self.image.set_colorkey((255, 0, 255))\r\n img_h = self.image.get_height()\r\n self.rect = self.image.get_rect(topleft=(SCREEN_SIZE[0]/2, SCREEN_SIZE[1]-img_h-img_h/2))\r\n self.dirty = 2\r\n self.intervall = 40\r\n self.count = self.intervall\r\n Flasche(1)\r\n Deckel.num = 0\r\n \r\n def update(self):\r\n self.rect.centerx = pygame.mouse.get_pos()[0]\r\n pygame.mouse.set_visible(False)\r\n if self.rect.right > PLAY_INTERVALL[1]:\r\n self.rect.right = PLAY_INTERVALL[1]\r\n pygame.mouse.set_visible(True)\r\n if self.rect.left < PLAY_INTERVALL[0]:\r\n self.rect.left = PLAY_INTERVALL[0]\r\n pygame.mouse.set_visible(True)\r\n \r\n \r\n self.count += 1\r\n if self.count >= self.intervall:\r\n self.count = 0\r\n self.intervall = random.randint(*LEVEL.intervall)\r\n for num in range( random.randint(*LEVEL.num_beers_generation) ):\r\n Flasche(LEVEL.speed)\r\n # smaller collision rect\r\n orig = self.rect\r\n self.rect = pygame.Rect((orig.left, orig.top+orig.height/2), (orig.width, 3))\r\n collider = pygame.sprite.spritecollide(self, Flasche.group, True)\r\n self.rect = orig\r\n\r\nclass Text(FastRenderGroup.DirtySprite):\r\n \r\n def __init__(self, pos, msg, font, color=(255, 0, 0)):\r\n FastRenderGroup.DirtySprite.__init__(self)\r\n self.font = font\r\n self.points = 0\r\n self.image = self.font.render(str(msg), 2, color)\r\n self.rect = self.image.get_rect(topleft=pos)\r\n \r\n def center(self, dx=0, dy=0):\r\n self.rect = self.image.get_rect(center=(SCREEN_SIZE[0]/2, SCREEN_SIZE[1]/2))\r\n self.rect.move_ip(dx, dy)\r\n\r\n\r\n\r\ndef main():\r\n \r\n SCREEN_SIZE = (PLAY_INTERVALL[1]+LEVEL.image.get_width(), 900)\r\n pygame.display.init()\r\n pygame.font.init()\r\n screen = pygame.display.set_mode(SCREEN_SIZE)\r\n bgd = pygame.Surface(screen.get_size()).convert()\r\n bgd.fill((255, 255, 255))\r\n \r\n running3 = True\r\n while running3:\r\n renderer = FastRenderGroup.LayeredDirty()\r\n \r\n # init stuff\r\n Flasche.renderer = renderer\r\n renderer.add(Harasse(), layer=12)\r\n renderer.add(LEVEL, layer = -1)\r\n bier = Bier()\r\n LEVEL.glas = Glas()\r\n LEVEL.glas.bier = bier\r\n renderer.add(bier)\r\n renderer.add(LEVEL.glas)\r\n LEVEL.deckels = []\r\n d = Deckel()\r\n LEVEL.deckels.append(d)\r\n renderer.add(d)\r\n d = Deckel()\r\n LEVEL.deckels.append(d)\r\n renderer.add(d)\r\n d = Deckel()\r\n LEVEL.deckels.append(d)\r\n renderer.add(d)\r\n \r\n \r\n LEVEL.score = Score()\r\n renderer.add(LEVEL.score)\r\n paused_text = Text((0,0), \"Paused\", pygame.font.Font(\"freesansbold.ttf\", 200))\r\n paused_text.center()\r\n paused_text.visible = False\r\n renderer.add(paused_text, layer = 1000)\r\n \r\n \r\n level = 0\r\n clock = pygame.time.Clock()\r\n ## pygame.event.set_grab(True)\r\n paused = False\r\n running = True\r\n running2 = True\r\n while running and LEVEL.running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n running2 = False\r\n running3 = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running = False \r\n elif event.type == pygame.ACTIVEEVENT:\r\n if event.gain:\r\n paused = False\r\n paused_text.visible = False\r\n else:\r\n paused = True\r\n paused_text.visible = True\r\n clock.tick(60)\r\n ## print clock.get_fps()\r\n # draw screen\r\n if not paused:\r\n renderer.update()\r\n pygame.display.update(renderer.draw(screen, bgd))\r\n # loop end\r\n pygame.mouse.set_visible(True)\r\n gameover_text = Text((0,0), \"Game over!\", pygame.font.Font(\"freesansbold.ttf\", 100))\r\n gameover_text.center()\r\n renderer.add(gameover_text)\r\n \r\n yes_text = Text((0,0), \"try again\", pygame.font.Font(\"freesansbold.ttf\", 50))\r\n yes_text.center(-100, 200)\r\n renderer.add(yes_text)\r\n \r\n no_text = Text((0,0), \"exit\", pygame.font.Font(\"freesansbold.ttf\", 50))\r\n no_text.center(100, 200)\r\n renderer.add(no_text)\r\n \r\n ## img = font.render(\"Game over!\", 20, (255, 0, 0))\r\n ## screen.blit(img, (SCREEN_SIZE[0]/2-img.get_width()/2, SCREEN_SIZE[1]/2-img.get_height()/2))\r\n ## pygame.display.flip()\r\n while running2:\r\n pygame.display.update(renderer.draw(screen, bgd))\r\n event = pygame.event.wait()\r\n if event.type == pygame.QUIT:\r\n running2 = False\r\n running3 = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running2 = False \r\n running3 = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if no_text.rect.collidepoint(event.pos):\r\n running3 = False\r\n running2 = False\r\n elif yes_text.rect.collidepoint(event.pos):\r\n running2 = False\r\n \r\n renderer.empty()\r\n Flasche.group.empty()\r\n LEVEL.reset()\r\n running = True\r\n running2 = True\r\n \r\n ## pygame.quit()\r\n \r\nif __name__== '__main__':\r\n main()" }, { "alpha_fraction": 0.3812010586261749, "alphanum_fraction": 0.4308094084262848, "avg_line_length": 30.83333396911621, "blob_id": "c0126df2833facbd4edca1bc53c07b90cecee129", "content_id": "0a5b3e5b57d341d4568b784456aea6495d4acef4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/15112-CMU/week9/CT practice.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def ct1(x, d, *args):\n print(\" \" * d + str(args) + \" \" + str(x))\n if len(args) <= 2:\n result = x\n else:\n mid = len(args) // 2\n res1 = ct1(x + len(args), d + 1, *args[:mid])\n res2 = ct1(x + sum(args), d + 1, *args[mid:])\n result = res1 + res2\n print(\" \" * d + \"--> \" + str(result))\n return result\nprint(ct1(0, 0, 7, 3, 5, 1, 2))\n\n" }, { "alpha_fraction": 0.5431910753250122, "alphanum_fraction": 0.5879064798355103, "avg_line_length": 27.536231994628906, "blob_id": "fef4a6a987cde2181a55184a2ead70d5f1732052", "content_id": "1772f84d82e71e92cafc578b0e9db4f5ac3217aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1968, "license_type": "no_license", "max_line_length": 51, "num_lines": 69, "path": "/15112-CMU/week2/test.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# def isKaprekarNumber(n):\n# if n < 1:\n# return False\n# if n == 1:\n# return True\n# lenOfNumsquare = len(str(n**2))\n# for i in range(1, lenOfNumsquare):\n# A = n**2 // 10 ** i\n# B = n**2 % 10 ** i\n# if B != 0 and A + B == n:\n# return True\n# return False\n\ndef isKaprekarNumber(n):\n originalnum = n\n if n <= 0:\n return False\n elif n == 1:\n return True\n for i in range(1, len(str(originalnum**2))):\n A = originalnum ** 2 // 10 ** i\n B = originalnum ** 2 % 10 ** i\n if A + B == originalnum:\n if B != 0:\n return True\n return False\n\n\ndef nthKaprekarNumber(n):\n found = 0\n guess = 0\n while (found <= n):\n guess += 1\n if (isKaprekarNumber(guess)):\n found += 1\n return guess\n\ndef testIsKaprekarNumber():\n print(\"Testing isKaprekarNumber()...\", end=\"\")\n assert(isKaprekarNumber(0) == False)\n assert(isKaprekarNumber(1) == True)\n assert(isKaprekarNumber(4) == False)\n assert(isKaprekarNumber(9) == True)\n assert(isKaprekarNumber(36) == False)\n assert(isKaprekarNumber(45) == True)\n assert(isKaprekarNumber(450) == False)\n assert(isKaprekarNumber(10) == False)\n assert (isKaprekarNumber(2223) == True)\n assert (isKaprekarNumber(22222) == True)\n assert (isKaprekarNumber(77778) == True)\n assert (isKaprekarNumber(82656) == True)\n assert (isKaprekarNumber(38962) == True)\n print(\"Passed.\")\n\ndef testNthKaprekarNumber():\n print(\"Testing nthKaprekarNumber()...\", end=\"\")\n assert(nthKaprekarNumber(0) == 1)\n assert(nthKaprekarNumber(1) == 9)\n assert(nthKaprekarNumber(2) == 45)\n assert(nthKaprekarNumber(3) == 55)\n assert(nthKaprekarNumber(4) == 99)\n assert(nthKaprekarNumber(5) == 297)\n assert(nthKaprekarNumber(6) == 703)\n assert(nthKaprekarNumber(7) == 999)\n print('Passed.')\n\n\ntestNthKaprekarNumber()\ntestIsKaprekarNumber()" }, { "alpha_fraction": 0.3974255919456482, "alphanum_fraction": 0.4537409543991089, "avg_line_length": 32.14666748046875, "blob_id": "9c6d4197e0e0a9eb3dd211d58c53ea7427506f57", "content_id": "d5533733e883d6dbda7a7567edd47c2be40ea053", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2486, "license_type": "no_license", "max_line_length": 96, "num_lines": 75, "path": "/15112-CMU/week4 cold cold/test0.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import copy\n\ndef lookAndSay(lst):\n list_num = list(lst)\n result = []\n count = 1\n if list_num == []:\n return []\n elif list_num != []:\n for i in range(len(list_num)):\n if (i + 1) < len(list_num) and list_num[i + 1] == list_num[i]:\n count += 1\n else:\n if i == 0 and list_num[i + 1] != list_num[i]:\n result.append((1, list_num[0]))\n elif i == 0 and list_num[i + 1] == list_num[i]:\n continue\n elif i == len(list_num) - 1:\n if list_num[i] == list_num[i - 1]:\n result.append((count, list_num[i - 1]))\n else:\n result.append((1, list_num[i]))\n else:\n if list_num[i] == list_num[i - 1]:\n result.append((count, list_num[i - 1]))\n elif list_num[i] != list_num[i - 1]:\n result.append((1, list_num[i]))\n count = 1\n return result\n\n# def lookAndSay(lst):\n# list_num = list(lst)\n# result = []\n# count = 1\n# if list_num == []:\n# return []\n# elif list_num != []:\n# for i in range(len(list_num)):\n# if list_num[i] == list_num[i - 1]:\n# count += 1\n# else:\n# count += 1\n# result.append((count - len(list_num[:i]), list_num[i]))\n#\n# return result\n\n\n\n\ndef _verifyLookAndSayIsNondestructive():\n a = [1,2,3]\n b = copy.copy(a)\n lookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\n\ndef testLookAndSay():\n print(\"Testing lookAndSay()...\", end=\"\")\n assert(_verifyLookAndSayIsNondestructive() == True)\n assert(lookAndSay([]) == [])\n assert(lookAndSay([1,1,1]) == [(3,1)])\n assert(lookAndSay([-1,2,7]) == [(1,-1),(1,2),(1,7)])\n assert(lookAndSay([3,3,8,-10,-10,-10]) == [(2,3),(1,8),(3,-10)])\n assert(lookAndSay([1, 1, 2, 2, 3, 7]) == [(2, 1), (2, 2), (1, 3), (1, 7)])\n assert(lookAndSay([-1, 2, 2, 2, 2, 3, -10, 7, 7]) == [(1, -1),(4, 2),(1, 3),(1,-10),(2, 7)])\n assert(lookAndSay([0,0,0])==[(3,0)])\n print(\"Passed.\")\n\ntestLookAndSay()\n# print(lookAndSay([1,1,,11]))\nprint(lookAndSay([-1,2,7,6,8,2]))\n# print(lookAndSay([3,3,8,-10,-10,-10]))\n# # print(lookAndSay([3,3,8,-10,-10,-10]))\n# print(lookAndSay([1, 1, 2, 2, 3, 7]))\n# print(lookAndSay([-1, 2, 2, 2, 2, 3, -10, 7, 7]))\n" }, { "alpha_fraction": 0.6318408250808716, "alphanum_fraction": 0.6348258852958679, "avg_line_length": 24.615385055541992, "blob_id": "7e0f446b9a6135ba8cbcb1f83d7687a017f04ce3", "content_id": "16edd5409074a5afdc25e6b45f3be5c9d77972f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1005, "license_type": "no_license", "max_line_length": 61, "num_lines": 39, "path": "/15112-CMU/FIFAworldcup copy/Player.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport os\nimport time\n\nfrom const import *\nfrom FBPlayer import *\nfrom Ball import BALL\n\nDISTANCE_NEW_FRAME = 20\n\nclass Player(FBPlayer):\n def __init__(self, centerx, centery):\n super(Player, self).__init__(BLUE_TEAM, centerx, centery)\n\n def handle(self):\n keys = pygame.key.get_pressed() # take pressed keys\n\n # update direction\n if keys[pygame.K_UP] and keys[pygame.K_LEFT]:\n self.move(UP_LEFT)\n elif keys[pygame.K_UP] and keys[pygame.K_RIGHT]:\n self.move(UP_RIGHT)\n elif keys[pygame.K_DOWN] and keys[pygame.K_LEFT]:\n self.move(DOWN_LEFT)\n elif keys[pygame.K_DOWN] and keys[pygame.K_RIGHT]:\n self.move(DOWN_RIGHT)\n elif keys[pygame.K_UP]:\n self.move(UP)\n elif keys[pygame.K_DOWN]:\n self.move(DOWN)\n elif keys[pygame.K_LEFT]:\n self.move(LEFT)\n elif keys[pygame.K_RIGHT]:\n self.move(RIGHT)\n\n # if BALL.owner == None:\n # length = BALL.velocity.length()\n # if length < 2:\n # self.takeBall()\n\n\n " }, { "alpha_fraction": 0.5046418905258179, "alphanum_fraction": 0.5407825112342834, "avg_line_length": 39.22666549682617, "blob_id": "d843ee23224af421d8060bc4909aa35a0c017397", "content_id": "7c37ebc000c5b350824b2fbb13d41f686b09117b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3022, "license_type": "no_license", "max_line_length": 114, "num_lines": 75, "path": "/15112-CMU/week2/testfinal.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10**-9\n\ndef drawThreadPattern(canvas, size, numSpokes, startSpoke, numSkips):\n import math\n (cx, cy, r) = (size/2, size/2, size/2 * 0.9)\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, outline='black', width=r/30)\n for i in range(numSpokes):\n iAngle = math.pi / 2 - (2 * math.pi) * (i / numSpokes) + math.pi #!!!!!!!!!!!!!\n ix = cx + r * math.cos(iAngle)\n iy = cy - r * math.sin(iAngle)\n ixold = cx + 0.95 * r * math.cos(iAngle)\n iyold = cy - 0.95 * r * math.sin(iAngle)\n\n iAnglenew = math.pi / 2 - (2 * math.pi) * ((i + numSkips) / numSpokes) + math.pi\n ixnew = cx + 0.95 * r * math.cos(iAnglenew)\n iynew = cy - 0.95 * r * math.sin(iAnglenew)\n ixt = cx + 0.85 * r * math.cos(iAngle)\n iyt = cy - 0.85 * r * math.sin(iAngle)\n\n # canvas.create_line(ixold, iyold, ixnew, iynew, width=1)\n canvas.create_text(ixt, iyt, text= str(i), font=\"Arial 16 bold\", width=10)\n if i == startSpoke:\n canvas.create_oval(ix - (r/20)*1.2, iy - (r/20)*1.1, ix + (r/20)*1.1, iy + (r/20)*1.1,\n fill='green', outline='black', width=1)\n ixfix = ixold\n iyfix = iyold\n begin = True\n # while ixnew != ixfix and iynew != iyfix:\n while begin:\n canvas.create_line(ixold, iyold, ixnew, iynew, width=1)\n ixold = ixnew\n iyold = iynew\n # iAnglenew = math.pi / 2 - (2 * math.pi) * ((i + numSkips) / numSpokes) + math.pi\n iAnglenew = iAnglenew - (2 * math.pi) * (numSkips / numSpokes)\n ixnew = cx + r * math.cos(iAnglenew)\n iynew = cy - r * math.sin(iAnglenew)\n\n if almostEqual(ixnew, ixfix) and almostEqual(iynew, iyfix):\n begin = False\n # break\n print(ixold, iyold, ixnew, iynew)\n else:\n canvas.create_oval(ix - (r / 20) * 1.2, iy - (r / 20) * 1.1, ix + (r / 20) * 1.1, iy + (r / 20) * 1.1,\n fill='red', outline='black', width=1)\n # canvas.create_line(ixold, iyold, ixnew, iynew, width = 1)\n # print(ix, iy, ixnew, iynew)\n #!!!!!!!!!!!用index试试\n\n\n\n\ndef runDrawThreadPattern(width, height, numSpokes, startSpoke, numSkips):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # width must equal height\n assert(width == height)\n drawThreadPattern(canvas, width, numSpokes, startSpoke, numSkips)\n root.mainloop()\n\n\ndef testDrawThreadPattern():\n print(\"Testing drawThreadPattern...\", end=\"\")\n runDrawThreadPattern(400, 400, 12, 0, 5)\n runDrawThreadPattern(200, 200, 10, 3, 4)\n runDrawThreadPattern(500, 500, 19, 8, 15)\n print(\"Done.\")\n\ntestDrawThreadPattern()" }, { "alpha_fraction": 0.5992661118507385, "alphanum_fraction": 0.6144025921821594, "avg_line_length": 43.79794692993164, "blob_id": "97a0329f3d238a25a76382fd9039e15be0173f01", "content_id": "b301109cef84d1067c985e88831f36ef33fb774b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13081, "license_type": "no_license", "max_line_length": 80, "num_lines": 292, "path": "/15112-CMU/112-opencv-tutorial-master/TrackingFaces.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# These are necessary to make python2 act more like python3\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport cv2\nimport sys\nimport time\nimport math\nimport collections\n\ntry:\n import coloredlogs, logging\nexcept ImportError:\n print(\"You should install coloredlogs! \\\"sudo pip install coloredlogs\\\"\")\n import logging\n\n# The FaceDetector class performs all of the detection and drawing of the mask \n# on top of the detected face. The mask image below (Mask Of Sliske), will be \n# drawn on top of all detected faces, given that the face is basically vertical.\nclass FaceDetector(object):\n\n # During initialization, all files are loaded, and constants are set which\n # affect the accuracy and speed of processing.\n def __init__(self):\n\n # Load in the classifiers we need for the face and eye.\n # These are files defining a haar cascade, which is a standard classical\n # technique for extracting some sort of feature (e.g. face, eyes). For\n # examples of more detectors, see the opencv source directory:\n # \n # https://github.com/opencv/opencv/tree/master/data/haarcascades\n # \n # For more information on how haar cascades work in general, and how to\n # do face detection with haar cascades, see this post:\n # \n # http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html\n facePath = \"haarcascades/haarcascade_frontalface_default.xml\"\n logging.info(\"Loading face classifier from %s\", facePath)\n self.faceCascade = cv2.CascadeClassifier(facePath)\n\n eyePath = \"haarcascades/haarcascade_eye.xml\"\n logging.info(\"Loading eye classifier from %s\", eyePath)\n self.eyeCascade = cv2.CascadeClassifier(eyePath)\n \n # Load in the mask we overlay on top of the face.\n # This image was shamelessly ripped from somewhere on Google.\n maskPath = \"Mask_of_Sliske,_Light_detail.png\"\n logging.info(\"Loading mask from %s\", maskPath)\n self.face_mask = cv2.imread(maskPath, cv2.IMREAD_UNCHANGED)\n\n # Set max sizes for iamge processing. These are used when rescaling the\n # images before processing, in order to tradeoff accuracy and\n # performance. If you find the haar cascades are taking too long to run\n # on your computer, try reducing these slightly. Alternatively, if you\n # have a beefy machine, try increasing these and notice the differences.\n self.frame_max_dim = 96\n self.face_max_dim = 96\n\n # Use a deque as a fixed size list for tracking the eye centers and\n # lengths to add some more filtering to it. Increasing the HISTORY\n # dampens the motion of the mask more, trading off between stationary\n # head performance and moving head performance. Try changing it!\n # TODO: Improve tracking performance for non-stationary targets.\n HISTORY = 5\n self.eye_center_deq = collections.deque(maxlen=HISTORY)\n self.eye_len_deq = collections.deque(maxlen=HISTORY)\n\n\n # This poorly named function will take a face, find the eyes inside of it,\n # and use those to determine how to position and scale the mask. The mask is\n # then drawn on top of the original frame, potentially with debug\n # information.\n def findEyes(self, gray_face, face_pos):\n # Rescale the gray_face to a max size. See earlier comments about\n # resizing image, this is the same idea.\n rows, cols = gray_face.shape\n face_scale_factor = self.face_max_dim / max(rows, cols)\n gray_face_small = cv2.resize(gray_face, (0, 0),\n fx=face_scale_factor, fy=face_scale_factor)\n\n\n # Find the eyes using the eye classifier, different than the face one.\n eyes = self.eyeCascade.detectMultiScale(\n gray_face_small,\n )\n\n # Figure out where the eyes are in the original image (this is just\n # doing some coordinate transformations)\n x1, y1, x2, y2 = face_pos\n eye_points = []\n # Use :2 because the return values seem to be ordered by confidence, so\n # taking the highest 2 confidence values gets us the eyes most often.\n # Most people only have 2 eyes, so that works out well.\n for eye_pos in eyes[:2]:\n x, y, w, h = eye_pos\n x = int(x / face_scale_factor)\n y = int(y / face_scale_factor)\n w = int(w / face_scale_factor)\n h = int(h / face_scale_factor)\n\n # Add a box for the eyes onto the debug frame.\n cv2.rectangle(self.debug_frame, (x1 + x, y1 + y), \n (x1 + x + w, y1 + y + h), (1, 1, 255), 2)\n eye_points.append((x1 + x + w // 2, y1 + y + h // 2))\n\n # This is a \"clean\" way to check for the existence of 2 eyes, since the\n # loop won't execute if 2 eyes (1 or 0) aren't present. The zip function\n # is magic, just play with it a bit.\n for p1, p2 in zip(eye_points, eye_points[1:]):\n # Draw debug line between eyes, to show this loop executed.\n cv2.line(self.debug_frame, p1, p2, (1, 1, 255), 2)\n \n p1 = np.array(p1)\n p2 = np.array(p2)\n\n # Adding some filtering around the eye length\n eye_len = np.linalg.norm(p1 - p2)\n self.eye_len_deq.append(eye_len)\n eye_len = sum(self.eye_len_deq) / len(self.eye_len_deq)\n\n eye_center = (p1 + p2) / 2\n self.eye_center_deq.append(eye_center)\n # I'm very surprised this works with numpy arrays. Yay python!\n eye_center = sum(self.eye_center_deq) / len(self.eye_center_deq)\n \n # Calculate angle between the eyes.\n # TODO: Use this information to rotate the mask slightly.\n eye_line = p2 - p1\n eye_line_norm = eye_line / np.linalg.norm(eye_line)\n horizontal = np.array([1, 0], dtype=float)\n angle = math.acos(np.dot(eye_line_norm, horizontal))\n logging.debug(\"Calculated eye angle: %f deg\", math.degrees(angle))\n\n # Scale the mask to make it fit the person wearing it in the camera\n # image. This is assuming that the length between the eyes is some\n # fixed ratio smaller than the entire head (e.g. 2.25).\n mask_scale_factor = eye_len * 2.25 / self.face_mask.shape[1]\n scaled_mask = cv2.resize(self.face_mask, (0, 0),\n fx=mask_scale_factor, fy=mask_scale_factor)\n\n # Position the mask so that the eyelines will roughly line up. With\n # the current parameters, 35% of the mask is above the eyeline, 65%\n # below, and 50% on either side. In other words, centered\n # horizontally about the center of the eye line, not centered\n # vertically about the line itself.\n # \n # Note that this add in the size (rows / cols) in order to ensure\n # there aren't weird sizing issues in the assignment later. However,\n # this doesn't do bounds checking if the face would be outside.\n # TODO: Clip mask if it would land outside image. Crashes right now.\n mask_rows, mask_cols, _ = scaled_mask.shape\n y1_mask = int(eye_center[1]) - int(mask_rows * 0.35)\n y2_mask = y1_mask + scaled_mask.shape[0]\n x1_mask = int(eye_center[0]) - int(mask_cols * 0.5)\n x2_mask = x1_mask + scaled_mask.shape[1]\n\n frame_roi = self.frame[y1_mask:y2_mask, x1_mask:x2_mask, :]\n\n # Channel by channel, assign the mask onto the original frame. This\n # is to make use of the fact that we have a PNG image for the mask\n # (with transparency).\n mask = scaled_mask[:, :, 3] != 0\n frame_roi[:, :, 0][mask] = scaled_mask[:, :, 0][mask]\n frame_roi[:, :, 1][mask] = scaled_mask[:, :, 1][mask]\n frame_roi[:, :, 2][mask] = scaled_mask[:, :, 2][mask]\n\n\n # Processing the face simply involves using the face_pos coordinates to rip\n # the face from the original gray image, and calling findEyes on that\n # smaller face image.\n def processFace(self, face_pos):\n x, y, w, h = face_pos\n sf = 1.0 / self.frame_scale_factor\n x1 = int(x * sf)\n y1 = int(y * sf)\n x2 = int((x + w) * sf)\n y2 = int((y + h) * sf)\n\n # First, draw the face onto the debug frame.\n cv2.rectangle(self.debug_frame, (x1, y1), (x2, y2), (1, 255, 1), 2)\n\n # For the face, find all of the eyes.\n gray_face = self.gray_frame[y1:y2, x1:x2]\n self.findEyes(gray_face, (x1, y1, x2, y2))\n\n\n # Draws tracking history for the eye centers. If the blue dots are clustered\n # together, we can expect good filtering performance. If they're clustered\n # poorly (i.e. in a line), then we have motion.\n def drawTrackingHistory(self):\n for p1, p2 in zip(self.eye_center_deq, list(self.eye_center_deq)[1:]):\n cv2.line(self.debug_frame, tuple(p1.astype(int)),\n tuple(p2.astype(int)), (255, 127, 1), 1)\n \n for p in self.eye_center_deq:\n cv2.circle(self.debug_frame, tuple(p.astype(int)), 2, (255, 1, 1))\n\n # The function where the majority of the logic hapens. `frame` is a numpy\n # array (opencv image) where faces are to be detected. The process function\n # will detect faces, overlay the mask, and optionally overlay the debug\n # information on top of the face as well.\n def process(self, frame):\n self.frame = frame\n\n # The debug frame is so that debug information can be drawn above\n # everything else. Based on how the mask is calculated (!= 0, per\n # channel), we need to use values like (1, 255, 1) for the color rather\n # than just (0, 255, 0).\n self.debug_frame = np.zeros(frame.shape, dtype=frame.dtype)\n\n # Convert to black and white because the haar cascades don't operate on\n # RGB / BGR frames.\n self.gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n x, y = self.gray_frame.shape\n\n # Resize the frame to be smaller. This is crucial in ensuring a\n # performant application, since the number of pixels to process reduces\n # quadratically with the length / width. The smaller the image, the\n # faster the prediction, though the lower the quality of the prediction\n # (sometimes nothing is found at all).\n self.frame_scale_factor = self.frame_max_dim / max(x, y)\n self.gray_frame_small = cv2.resize(self.gray_frame, (0, 0),\n fx=self.frame_scale_factor, fy=self.frame_scale_factor)\n\n # This is the line that actually performs the detection. Faces is a list\n # of tuples of the form (x, y, w, h) in image coordinates.\n faces = self.faceCascade.detectMultiScale(\n self.gray_frame_small,\n )\n\n for face_pos in faces:\n # face_pos = (x, y, w, h)\n self.processFace(face_pos)\n\n # Draw debug on top of everything. See above comments about debugger\n # frame. This will draw a box around the face, boxes around the eyes,\n # connect the eye centers, and draw center tracking dots.\n # TODO: Tie this into logger level?\n self.drawTrackingHistory()\n mask = self.debug_frame != 0\n self.frame[mask] = self.debug_frame[mask]\n \n\n# ======================= START READING HERE ==============================\ndef main():\n \n # Open a camera, allowing us to read frames. Note that the frames are BGR,\n # not RGB, since that's how OpenCV stores them. The index 0 is usually the\n # integrated webcam on your laptop. If this fails, try different indices, or\n # get a webcam.\n camera = cv2.VideoCapture(0)\n detector = FaceDetector()\n\n # In every loop, we read from the camera, process the frame, and then\n # display the processed frame. The process function is where all of the\n # interesting logic happens.\n while True:\n # Timing code to get diagnostics on per frame performance\n start = time.time()\n _, frame = camera.read()\n end = time.time()\n logging.debug(\"Took %2.2f ms to read from camera\", \n round((end - start) * 1000, 2))\n\n start = time.time()\n detector.process(frame) # Process \n end = time.time()\n logging.info(\"Took %2.2f ms to process frame\", \n round((end - start) * 1000, 2))\n \n # Standard display code, showing the resultant frame in a window titled\n # \"Video\". Close the window with 'q'.\n cv2.imshow('Video', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Clean up after ourselves.\n camera.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n\n try:\n coloredlogs.install(level=\"INFO\")\n except NameError:\n logging.basicConfig(level=logging.DEBUG)\n\n main()\n" }, { "alpha_fraction": 0.4928297698497772, "alphanum_fraction": 0.5189929008483887, "avg_line_length": 29.245033264160156, "blob_id": "0ab649e9f3a365893482838cca54abc4485cdd9e", "content_id": "23e9587d4183ff0ad9324b9156e5ebceb16c51a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9135, "license_type": "no_license", "max_line_length": 85, "num_lines": 302, "path": "/15112-CMU/week9/hw9.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################################\n# Hw9\n# Your Name: Ming Xu\n# Your Andrew ID:mxu2\n# Your Section:2N\n#################################################################\n# this function return the Collaborator\ndef alternatingSumCollaborators():\n return \"nobody\"\n\n\n# This is the helper function for alternatingSum(lst)\ndef doTheRecursion(lst, flag):\n if len(lst) == 0:\n return 0\n else:\n if flag:\n # transfer the flag == True to flag == False\n return lst[0] + doTheRecursion(lst[1:], False)\n else:\n # transfer the flag == False to flag == True\n return -lst[0] + doTheRecursion(lst[1:], True)\n\n\n# This is the alternating sum main function\ndef alternatingSum(lst):\n flag = True\n return doTheRecursion(lst, flag)\n\n\n# this function return the Collaborator\ndef binarySearchValuesCollaborators():\n return \"xiaoqint\"\n\n\n# this is the helper function for binary search values\ndef binarySearchRecur(alist, first, last, item, lstTuple):\n if first > last:\n return lstTuple\n midpoint = (first + last) // 2\n lstTuple += [(midpoint, alist[midpoint])]\n if alist[midpoint] == item:\n return lstTuple\n else:\n if item < alist[midpoint]:\n newLast = midpoint - 1\n return binarySearchRecur(alist, first,\n newLast, item, lstTuple)\n else:\n newFirst = midpoint + 1\n return binarySearchRecur(alist, newFirst,\n last, item, lstTuple)\n\n\n# this is the binary search values main function\ndef binarySearchValues(lst, item):\n first = 0\n last = len(lst) - 1\n lstTuple = []\n return binarySearchRecur(lst, first, last, item, lstTuple)\n\n\n# this is the helper function for findCategoryPath\ndef recurFindPathHelper(d, value, lst):\n for key in d:\n if (type(d[key])) != dict:\n if d[key] == value:\n lst.append(key)\n return lst\n else:\n if recurFindPathHelper(d[key],\n value, lst + [key]) is None:\n continue\n else:\n return recurFindPathHelper(d[key],\n value, lst + [key])\n\n\n# this is the main function for findCategoryPath\ndef findCategoryPath(d, value):\n lst = []\n return recurFindPathHelper(d, value, lst)\n\n\n# this is the helper function for powersOf3ToN\ndef powerHelper(n, count, lst):\n if n < 1:\n return lst\n else:\n n = n//3\n count += 1\n num = 3\n return powerHelper(n, count, lst + [num**count])\n\n\n# this is the main function for powersOf3toN(n)\ndef powersOf3ToN(n):\n if n <= 0:\n return []\n lst = []\n count = -1\n return powerHelper(n, count, lst)\n\n\n# this is function returns all the possible subsets of a.\ndef powerset(a):\n # Base case: the only possible subset of\n # an empty list is the empty list.\n if (len(a) == 0):\n return [ [] ]\n else:\n # Recursive Case: remove the first element,\n # then find all subsets of the remaining list.\n # Then duplicate each subset into two versions:\n # one without the first element, and one with it.\n partialSubsets = powerset(a[1:])\n allSubsets = [ ]\n for subset in partialSubsets:\n allSubsets.append(subset)\n allSubsets.append([a[0]] + subset)\n return allSubsets\n\n\n# get another part of the list\ndef getOtherPart(lst, left):\n import copy\n lfCopy = copy.deepcopy(left)\n res = []\n for c in lst:\n if c not in lfCopy:\n res += [c]\n else:\n lfCopy.remove(c)\n return res\n\n\n# This function returns all outcomes of two different lists\ndef divideAlistIntoTwoParts(lst):\n res = []\n allSubsets = powerset(lst)\n for left in allSubsets:\n otherPart = getOtherPart(lst, left)\n res.append((left, otherPart))\n return res\n\n\n# this is the loadBalance main function\ndef loadBalance(lst):\n res = None\n miniValue = sum(lst)\n allPossibleOutcomes = divideAlistIntoTwoParts(lst)\n for outcome in allPossibleOutcomes:\n if abs(sum(outcome[0]) - sum(outcome[1])) <= miniValue:\n miniValue = abs(sum(outcome[0]) - sum(outcome[1]))\n res = outcome\n return res\n\n\n# this is the helper function for generateValidParentheses\ndef generate(left, right, string, res):\n if left == 0 and right == 0:\n res.add(string)\n return res\n else:\n if left > 0:\n generate(left - 1, right, string + '(', res)\n if right > left:\n generate(left, right - 1, string + ')', res)\n\n\n# this is the main function for generateValidParentheses\ndef generateValidParentheses(n):\n res = set()\n if n == 0:\n return set()\n elif n % 2 != 0:\n return set()\n left, right = (n / 2), (n / 2)\n generate(left, right, \"\", res)\n return res\n\n#################################################\n# Hw9 Test Functions\n#################################################\n\ndef testAlternatingSum():\n print(\"Testing alternatingSum...\", end=\"\")\n assert(alternatingSum([1, 2, 3, 4, 5]) == 3)\n assert(alternatingSum([1, 2, 3, 4]) == - 2)\n assert(alternatingSum([]) == 0)\n assert(alternatingSum([1, 4, 6, 7, 4]) == 0)\n print(\"Passed!\")\n\n\ndef testBinarySearchValues():\n print(\"Testing binarySearchValues...\", end=\"\")\n assert(binarySearchValues(['a', 'c', 'f', 'g', 'm', 'q'], 'c') ==\n [(2, 'f'), (0, 'a'), (1, 'c')])\n assert(binarySearchValues(['a', 'c', 'f', 'g', 'm', 'q'], 'n') ==\n [(2, 'f'), (4, 'm'), (5, 'q')])\n assert(binarySearchValues(['a', 'c', 'f', 'g', 'm', 'q'], 'f') ==\n [(2, 'f')])\n assert(binarySearchValues(['a', 'b', 'c', 'd'], 'e') ==\n [(1, 'b'), (2, 'c'), (3, 'd')])\n assert(binarySearchValues(['a', 'b', 'c', 'd'], 'a') ==\n [(1, 'b'), (0, 'a')])\n assert(binarySearchValues(['a', 'b', 'c', 'd', 'e'], 'a') ==\n [(2, 'c'), (0, 'a')])\n print(\"Passed!\")\n\n\ndef testFindCategoryPath():\n print(\"Testing findCategoryPath...\", end=\"\")\n d = {\"Sporting\":\n {\"Spaniel\":\n {\"English Springer\": \"Betsy\"},\n \"Weimaraner\": \"Xeva\",\n \"Retriever\":\n {\"Golden\": \"Sammo\",\n \"Labrador\": \"Nya\"}\n },\n \"Working\":\n {\"Husky\": \"Stella\",\n \"Saint Bernard\": \"Rutherfurd\",\n \"Boxer\": \"Paximus\"},\n \"Herding\":\n {\"Corgi\":\n {\"Welsh\":\n {\"Cardigan\": \"Geb\",\n \"Pembroke\": \"Niinja\"}\n },\n \"Sheepdog\":\n {\"Bergamasco\": \"Samur\",\n \"Old English\": \"Duggy\",\n \"Shetland\": \"Walker\"}\n },\n \"Other\": \"Kimchee\"\n }\n value1 = \"Samur\"\n value2 = \"Weimaraner\"\n value3 = \"Betsy\"\n value4 = \"Other\"\n value5 = \"Paximus\"\n assert(findCategoryPath(d, value1) == [\"Herding\", \"Sheepdog\",\n \"Bergamasco\"])\n assert(findCategoryPath(d, value2) is None)\n assert(findCategoryPath(d, value3) == [\"Sporting\", \"Spaniel\",\n \"English Springer\"])\n assert(findCategoryPath(d, value4) is None)\n assert(findCategoryPath(d, value5) == [\"Working\", \"Boxer\"])\n print(\"Passed!\")\n\n\ndef testPowersOf3ToN():\n print(\"Testing powersOf3ToN...\", end=\"\")\n assert(powersOf3ToN(10.5) == [1, 3, 9])\n assert(powersOf3ToN(9) == [1, 3, 9])\n assert(powersOf3ToN(0) == [])\n assert(powersOf3ToN(0.9876) == [])\n assert(powersOf3ToN(1) == [1])\n assert(powersOf3ToN(-10.5) == [])\n assert(powersOf3ToN(-43) == [])\n assert(powersOf3ToN(2186.5) == [1,3,9,27,81,243,729])\n assert(powersOf3ToN(2187) == [1,3,9,27,81,243,729,2187])\n print(\"Passed!\")\n\n\ndef testLoadBalance():\n print(\"Testing loadBalance...\", end=\"\")\n assert(loadBalance([3, 6, 1, 7, 9, 8, 22, 3]) == ([3, 6, 1, 7, 9, 3], [8, 22]) or\n loadBalance([3, 6, 1, 7, 9, 8, 22, 3]) == ([3, 6, 9, 8, 3], [1, 7, 22]) or\n loadBalance([3, 6, 1, 7, 9, 8, 22, 3]) == ([9, 8, 7, 3, 3], [22, 6, 1]) or\n loadBalance([3, 6, 1, 7, 9, 8, 22, 3]) == ([3, 1, 22, 3], [6, 7, 9, 8])\n )\n assert(loadBalance([0, 1, 2]) == ([0, 2], [1]))\n print(\"Passed!\")\n\n\ndef testGenerateValidParentheses():\n print(\"Testing generateValidParentheses...\", end=\"\")\n assert(generateValidParentheses(4) == { \"(())\", \"()()\" })\n assert(generateValidParentheses(6) == { \"((()))\", \"()(())\",\n \"(())()\", \"(()())\", \"()()()\" })\n assert(generateValidParentheses(5) == set())\n assert(generateValidParentheses(0) == set())\n print(\"Passed!\")\n\n\ndef testAll():\n testAlternatingSum()\n testBinarySearchValues()\n testLoadBalance()\n testFindCategoryPath()\n testGenerateValidParentheses()\n testPowersOf3ToN()\n\n\ndef main():\n testAll()\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.6048593521118164, "alphanum_fraction": 0.6265984773635864, "avg_line_length": 30.54166603088379, "blob_id": "e1a66e4ef9b644b86c850a55c8276d3efacbd813", "content_id": "ade55263a6d41f85c530be42d9db447876be2697", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 782, "license_type": "no_license", "max_line_length": 107, "num_lines": 24, "path": "/15112-CMU/BeerGame/readme.txt", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "date: 03.08.2007\r\n\r\nproject: BierGame\r\n\r\nwho: DR0ID\r\n mail: \[email protected]\r\n homepage: \thttp://www.mypage.bluewin.ch/DR0ID/index.html\r\n\r\nartwork: Internet and DR0ID\r\n\r\nhow to run: Game.pyw, use the mouse to catch the beer bottles\r\n\r\nlicenses: ?\r\n\r\ndependecies: python 2.4.1 (www.python.org)\r\n pygame 1.7.1 (www.pygame.org)\r\n\r\ntools used: spe (http://pythonide.stani.be/)\r\n\r\ncomments:\tIt is a small game I have written in approximately one day. Hope you have fun.\r\n Actually its a clone of a game I have seen some time ago (that was written in flash IIRC).\r\n\r\n You can replace the file in the data directory with you prefered pictures. You can modifie the \r\n Levels too in the config file. \r\n" }, { "alpha_fraction": 0.5410677790641785, "alphanum_fraction": 0.5872690081596375, "avg_line_length": 25.351350784301758, "blob_id": "e1b031c93d66c0aa32adcbc1a248f00879250a68", "content_id": "61e0a854671c5eac6b6102cac9fa77bf4d6e329e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 55, "num_lines": 37, "path": "/15112-CMU/week4 cold cold/test1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def destructiveRemoveRepeats(lst):\n for i in range(len(lst) - 1, -1, -1):\n if lst.count(lst[i]) > 1:\n lst.pop(i)\n\ndef destructiveRemoveRepeats1(lst):\n lst.reverse()\n index = 0\n while index < len(lst):\n print(len(lst))\n if lst.count(lst[index]) > 1:\n lst.pop(index)\n else:\n index += 1\n lst.reverse()\n\ndef destructiveRemoveRepeats2(lst):\n lst.reverse()\n for num in lst:\n if lst.count(num) > 1:\n lst.remove(num)\n lst.reverse()\n\n\ndef testDestructiveRemoveRepeats():\n print(\"Testing destructiveRemoveRepeats()\", end=\"\")\n a = [1,3,5,3,3,2,1,7,5]\n assert(destructiveRemoveRepeats(a) == None)\n assert(a == [1,3,5,2,7])\n b = [1,2,3,-2]\n assert(destructiveRemoveRepeats(b) == None)\n assert(b == [1,2,3,-2])\n print(\"Passed.\")\n\ntestDestructiveRemoveRepeats()\n# print(destructiveRemoveRepeats([1,3,5,3,3,2,1,7,5]))\n# print(destructiveRemoveRepeats([1,2,3,-2]))" }, { "alpha_fraction": 0.6645885109901428, "alphanum_fraction": 0.7157106995582581, "avg_line_length": 28.703702926635742, "blob_id": "63d0eac1fd71594801f396eae0ad48a5cbdea4bd", "content_id": "78a15778c3a4bfa738dc8d9eb8aaf0ff815f8abb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 76, "num_lines": 27, "path": "/15112-CMU/112-opencv-tutorial-master/houghCircles.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\n#Read in our image, blur it and convert to grayscale\nimg = cv2.imread('coins.jpg',0)\nimg = cv2.medianBlur(img,5)\ncimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)\n\n#create a list of circles in the image\ncircles = cv2.HoughCircles(img,cv2.cv.CV_HOUGH_GRADIENT,1,50)\n\n#show our circles on the original image\n#np.around rounds each index of circles to the nearest int so we can draw it\ncircles = np.uint16(np.around(circles))\nfor i in circles[0,:]:\n # draw the outer circle\n cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)\n # draw the center of the circle\n cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)\n\n#display and save our new image\ncv2.imshow('detected circles',cimg)\ncv2.imwrite('detectedCoins.jpg', cimg)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.582495391368866, "alphanum_fraction": 0.59175705909729, "avg_line_length": 38.469974517822266, "blob_id": "6a65ec12837db069bc54dbb154e4cc8e0eea1681", "content_id": "57de70884d2469fedd5a99a1188e74afe7570c77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15116, "license_type": "no_license", "max_line_length": 84, "num_lines": 383, "path": "/15112-CMU/week5/10.6 tetris animation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw6\n# Your andrewID: yilunw\n# Your section: F\n#################################################\nfrom tkinter import *\n#################################################\n# Hw6-tetris\n#################################################\ndef gameDimentions(data):\n data.rows = 15\n data.cols = 10\n data.cellSize = 20\n data.margin = 25\n return (data.rows,data.cols,data.cellSize,data.margin)\n \ndef init(data):\n (data.rows,data.cols,data.cellSize,data.margin) = gameDimentions(data)\n data.emptyColor = \"#CFD8DC\"\n data.board = []\n #fill the board, which is a 2D list, with the empty color\n data.board += [[data.emptyColor for col in range(data.cols)]\\\n for row in range(data.rows)]\n data.board2 = []\n data.board2 += [[data.emptyColor for col in range(4)]\\\n for row in range(4)]\n # Seven \"standard\" pieces (tetrominoes)\n data.iPiece = [\n [ True, True, True, True ]\n ]\n\n data.jPiece = [\n [ True, False, False ],\n [ True, True, True ]\n ]\n\n data.lPiece = [\n [ False, False, True ],\n [ True, True, True ]\n ]\n\n data.oPiece = [\n [ True, True ],\n [ True, True ]\n ]\n\n data.sPiece = [\n [ False, True, True ],\n [ True, True, False ]\n ]\n\n data.tPiece = [\n [ False, True, False ],\n [ True, True, True ]\n ]\n\n data.zPiece = [\n [ True, True, False ],\n [ False, True, True ]\n ]\n data.tetrisPieces = [data.iPiece, data.jPiece, data.lPiece, data.oPiece, \\\n data.sPiece, data.tPiece, data.zPiece]\n data.tetrisPieceColors = [\"red\", \"#FFEB3B\", \"violet\", \"#EC407A\", \\\n \"#00ACC1\", \"green\", \"blue\"]\n data.paused = False\n firstFallingPiece = newFallingPiece(data)\n data.score = 0\n data.isGameOver = False\n\ndef drawCell(canvas,data,row,col):\n #draw every cell\n boardWidth = data.width - 2*data.margin\n boardHeight = data.height - 2*data.margin\n cellBoundsWidth = 3\n canvas.create_rectangle(data.margin + data.cellSize*col, data.margin + \\\n data.cellSize*row, data.margin + data.cellSize*(col+1), data.margin + \\\n data.cellSize*(row+1), fill = data.board[row][col], width = cellBoundsWidth)\n\ndef drawBoard(canvas, data):\n #draw the board by filling every cells(using draw cells)\n for row in range(data.rows):\n for col in range(data.cols):\n drawCell(canvas,data,row,col) \n\ndef newFallingPiece(data):\n import random\n randomIndex = random.randint(0, len(data.tetrisPieces) - 1)\n data.fallingPiece = data.tetrisPieces[randomIndex]\n data.fallingPieceRow = 0\n data.fallingPieceCol = data.cols//2 - len(data.fallingPiece[0])//2\n data.fallingPieceColor = data.tetrisPieceColors[randomIndex]\n data.numFallingPieceRow = len(data.fallingPiece)\n data.numFallingPieceCol = len(data.fallingPiece[0])\n \ndef drawFallingPiece(canvas, data):\n #find out the boolean value(True/False) to draw falling pieces\n for row in range(len(data.fallingPiece)):\n for col in range(len(data.fallingPiece[0])):\n if data.fallingPiece[row][col] == True:\n color = data.fallingPieceColor\n pieceRow = data.fallingPieceRow + row\n pieceCol = data.fallingPieceCol + col\n cellBoundsWidth = 3\n canvas.create_rectangle(data.margin + data.cellSize*pieceCol, \\\n data.margin + data.cellSize*pieceRow, \\\n data.margin + data.cellSize*(pieceCol+1), \\\n data.margin + data.cellSize*(pieceRow+1), \\\n fill = data.fallingPieceColor, width = cellBoundsWidth)\n\ndef moveFallingPiece(data, drow, dcol):\n if data.paused == False:\n data.fallingPieceRow += drow\n data.fallingPieceCol += dcol\n if fallingPieceIsLegal(data) == False:\n data.fallingPieceRow -= drow\n data.fallingPieceCol -= dcol\n return False\n return True\n\ndef hardDropFallingPiece(data):\n if data.paused == False:\n while fallingPieceIsLegal(data):\n data.fallingPieceRow += 1\n data.fallingPieceRow -= 1\n\ndef fallingPieceIsLegal(data):\n for row in range(len(data.fallingPiece)):\n for col in range(len(data.fallingPiece[0])):\n pieceRow = data.fallingPieceRow + row\n pieceCol = data.fallingPieceCol + col\n if data.fallingPiece[row][col] == True:\n #to check if it's on the board\n if pieceRow < 0 or pieceRow >= data.rows or \\\n pieceCol < 0 or pieceCol >= data.cols:\n return False\n #to check if the color of the cell's location is emptyColor\n if data.board[pieceRow][pieceCol] != data.emptyColor:\n return False\n return True\n\ndef rotateFallingPiece(data):\n if data.paused == False:\n import copy\n #set temp local variables that restores the old datas, \\\n #which may be used if the new piece is illegal\n oldRow = data.fallingPieceRow\n oldCol = data.fallingPieceCol\n oldNumRows = data.numFallingPieceRow\n oldNumCols = data.numFallingPieceCol\n #compute the new locations\n newRow = oldRow + oldNumRows//2 - oldNumCols//2\n newCol = oldCol + oldNumCols//2 - oldNumRows//2 \n newNumRows = oldNumCols\n newNumCols = oldNumRows\n oldPieceList = copy.deepcopy(data.fallingPiece)\n #create a new 2D list for storing the rotated piece\n newPieceList = []\n newPieceList += [[None for col in range(newNumCols)]\\\n for row in range(newNumRows)]\n #rotate the whole piece counterclockwise\n for row in range(len(newPieceList)):\n for col in range(len(newPieceList[0])):\n newPieceList[row][col] = data.fallingPiece\\\n [col][oldNumCols-1-row]\n #if the piece is legal, just return that piece on the board\n data.fallingPiece = newPieceList\n data.fallingPieceRow = newRow\n data.fallingPieceCol = newCol\n data.numFallingPieceRow = newNumRows\n data.numFallingPieceCol = newNumCols\n #if the rotated piece is not legal, go back to the previous step\n if fallingPieceIsLegal(data) == False:\n data.fallingPiece = oldPieceList\n data.fallingPieceRow = oldRow \n data.fallingPieceCol = oldCol\n data.numFallingPieceRow = oldNumRows\n data.numFallingPieceCol = oldNumCols \n\ndef rotateFallingPieceClockwise(data):\n if data.paused == False:\n import copy\n #set temp local variables that restores the old datas, \\\n #which may be used if the new piece is illegal\n oldRow = data.fallingPieceRow\n oldCol = data.fallingPieceCol\n oldNumRows = data.numFallingPieceRow\n oldNumCols = data.numFallingPieceCol\n #compute the new locations\n newRow = oldRow + oldNumRows//2 - oldNumCols//2\n newCol = oldCol + oldNumCols//2 - oldNumRows//2 \n newNumRows = oldNumCols\n newNumCols = oldNumRows\n oldPieceList = copy.deepcopy(data.fallingPiece)\n #create a new 2D list for storing the rotated piece\n newPieceList = []\n newPieceList += [[None for col in range(newNumCols)]\\\n for row in range(newNumRows)]\n #rotate the whole piece clockwise\n for row in range(len(newPieceList)):\n for col in range(len(newPieceList[0])):\n newPieceList[row][col] = data.fallingPiece\\\n [oldNumRows-1-col][row]\n #if the piece is legal, just return that piece on the board\n data.fallingPiece = newPieceList\n data.fallingPieceRow = newRow\n data.fallingPieceCol = newCol\n data.numFallingPieceRow = newNumRows\n data.numFallingPieceCol = newNumCols\n #if the rotated piece is not legal, go back to the previous step\n if fallingPieceIsLegal(data) == False:\n data.fallingPiece = oldPieceList\n data.fallingPieceRow = oldRow \n data.fallingPieceCol = oldCol\n data.numFallingPieceRow = oldNumRows\n data.numFallingPieceCol = oldNumCols \n\ndef removeFullRows(data): \n import copy\n index = data.rows-1\n #create an empty board for checking full rows\n board = []\n board += [[data.emptyColor for col in range(data.cols)]\\\n for row in range(data.rows)]\n count = 0\n #eliminate every full row (not containing any emptyColor) \\\n #and count the score based on the number of full rows\n for row in range(data.rows-1, -1, -1):\n if data.emptyColor not in data.board[row]:\n count += 1\n else:\n board[index] = copy.deepcopy(data.board[row])\n index -= 1\n data.board = board\n data.score += count**2\n \ndef placeFallingPiece(data):\n for row in range(data.numFallingPieceRow):\n for col in range(data.numFallingPieceCol):\n #to \"insert\" that piece into the board(so it cannot move)\n if data.fallingPiece[row][col] == True:\n data.board[data.fallingPieceRow+row][data.fallingPieceCol+col]\\\n = data.fallingPieceColor\n removeFullRows(data)\n\n'''\ndef drawCell2(canvas,data,row,col):\n #draw every cell\n boardWidth = data.width - 2*data.margin\n boardHeight = data.height - 2*data.margin\n cellBoundsWidth = 3\n canvas.create_rectangle(data.margin + data.cellSize*(data.cols+3+col), \\\n data.margin + data.cellSize*(data.rows//2+row), \\\n data.margin + data.cellSize*(data.cols+col+4), \\\n data.margin + data.cellSize*(data.rows//2+row+1), \\\n fill = data.board2[row][col], width = cellBoundsWidth)\n \ndef drawBoard2(canvas, data):\n #draw the board by filling every cells(using draw cells)\n for row in range(4):\n for col in range(4):\n drawCell2(canvas,data,row,col) \n\ndef drawNextFallingPiece(canvas, data):\n #find out the boolean value(True/False) to draw the next falling piece\n for row in range(len(data.fallingPiece)):\n for col in range(len(data.fallingPiece[0])):\n if data.fallingPiece[row][col] == True:\n color = data.fallingPieceColor\n pieceRow = data.fallingPieceRow + data.rows//2 + row\n pieceCol = data.fallingPieceCol + data.cols + col\n cellBoundsWidth = 3\n canvas.create_rectangle(data.margin + data.cellSize*pieceCol, \\\n data.margin + data.cellSize*pieceRow, \\\n data.margin + data.cellSize*(pieceCol+1), \\\n data.margin + data.cellSize*(pieceRow+1), \\\n fill = data.fallingPieceColor, width = cellBoundsWidth)\n'''\n\ndef mousePressed(event, data):\n # use event.x and event.y\n pass\n\ndef keyPressed(event, data):\n if event.keysym == \"a\":\n rotateFallingPiece(data)\n elif event.keysym == \"d\":\n rotateFallingPieceClockwise(data)\n elif event.keysym == \"Down\":\n moveFallingPiece(data, 1, 0)\n elif event.keysym == \"Left\":\n moveFallingPiece(data, 0, -1)\n elif event.keysym == \"Right\":\n moveFallingPiece(data, 0, 1)\n elif event.keysym == \"space\":\n hardDropFallingPiece(data)\n elif event.keysym == \"p\":\n data.paused = not data.paused#pause the game (can return back)\n elif event.keysym == \"r\":\n data = init(data)#restart the game\n\ndef timerFired(data):\n if data.paused == False:\n if data.isGameOver == True:\n return #stop generate any new piece, thus end the game\n if moveFallingPiece(data, +1, 0) == False:\n placeFallingPiece(data)\n newFallingPiece(data) #generate a new piece\n if fallingPieceIsLegal(data) == False:\n data.isGameOver = True\n \ndef redrawAll(canvas, data):\n canvas.create_rectangle(0, 0, data.width,data.height, fill = \"#29B6F6\")\n drawBoard(canvas, data)\n drawFallingPiece(canvas, data)\n '''\n drawBoard2(canvas, data)\n drawNextFallingPiece(canvas, data) \n canvas.create_text(data.width*(4/5), data.height/3, \\\n text = \"Next\\nFalling\\nPiece:\" ,\\\n font = \"Arial 23 bold\", fill = \"purple\" )\n #need to be revised\n '''\n canvas.create_text(data.width/2, data.margin/2, \\\n text = \"Score:\" + str(data.score) ,\\\n font = \"Arial 23 bold\", fill = \"purple\" )\n #if the tetris game pauses, return \"Game Paused!\" message \n if data.paused == True:\n canvas.create_rectangle(0, data.height/3, data.width, \\\n data.height*(2/3), fill = \"gold\")\n canvas.create_text(data.width/2, data.height/2, text = \"Game Paused!\",\\\n font = \"TimesNewRoman 35 bold\", fill = \"red\")\n #if the tetris game ends, return \"Game Over!\" message\n if data.isGameOver == True:\n canvas.create_rectangle(0, data.height/6, data.width, data.height/3, \\\n fill = \"gold\")\n canvas.create_text(data.width/2, data.height/4, text = \"Game Over!\",\\\n font = \"TimesNewRoman 35 bold\", fill = \"red\")\n\ndef playTetris(width, height):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 500 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n \nplayTetris(250,350)" }, { "alpha_fraction": 0.5647059082984924, "alphanum_fraction": 0.5805882215499878, "avg_line_length": 33.35353469848633, "blob_id": "832bf79cad4a6cf23177a388b0e07d68fdf50343", "content_id": "d9397f1cf1677be75f000546174d5ca75900698d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3400, "license_type": "no_license", "max_line_length": 73, "num_lines": 99, "path": "/15112-CMU/week10/sierpinskiTriangle.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\ndef init(data):\n data.level = 1\n\ndef drawSierpinskiTriangle(canvas, x, y, size, level):\n # (x,y) is the lower-left corner of the triangle\n # size is the length of a side\n # Need a bit of trig to calculate the top point\n if level == 0:\n topY = y - (size**2 - (size/2)**2)**0.5\n canvas.create_polygon(x, y, x + size, y, x + size/2, topY,\n fill=\"black\")\n else:\n print(level)\n # Bottom-left triangle\n drawSierpinskiTriangle(canvas, x, y, size/2, level-1)\n # Bottom-right triangle\n drawSierpinskiTriangle(canvas, x + size/2, y, size/2, level-1)\n # Top triangle\n midY = y - ((size/2)**2 - (size/4)**2)**0.5\n drawSierpinskiTriangle(canvas, x + size/4, midY, size/2, level-1)\n\n\ndef keyPressed(event, data):\n if event.keysym in [\"Up\", \"Right\"]:\n data.level += 1\n elif (event.keysym in [\"Down\", \"Left\"]) and (data.level > 0):\n data.level -= 1\n\ndef redrawAll(canvas, data):\n margin = min(data.width, data.height)//10\n x, y = margin, data.height - margin\n size = min(data.width, data.height) - 2*margin\n drawSierpinskiTriangle(canvas, x, y, size, data.level)\n canvas.create_text(data.width / 2, 0,\n text=\"Level %d Fractal\" % (data.level),\n font=\"Arial \" + str(int(margin / 3)) + \" bold\",\n anchor=\"n\")\n canvas.create_text(data.width / 2, margin,\n text=\"Use arrows to change level\",\n font=\"Arial \" + str(int(margin / 4)),\n anchor=\"s\")\n\ndef mousePressed(event, data): pass\n\ndef timerFired(data): pass\n\n# Updated Animation Starter Code\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 400)" }, { "alpha_fraction": 0.4618736505508423, "alphanum_fraction": 0.4727668762207031, "avg_line_length": 22, "blob_id": "314976fd16a4c1f7f0665db07a6ea9ffdc9289be", "content_id": "985127c8fc1bc41711760f756f770bfbbc60196a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 38, "num_lines": 20, "path": "/15112-CMU/week9/test 1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "d = {1:[], 2:{}, \"Retriever\":\n {\"Golden\": \"Sammo\",\n \"Labrador\": \"Nya\"}}\nprint(type(d[1]))\nprint(type(d[2]) == dict)\nprint(type(d[\"Retriever\"]))\na = []\nprint(a + [1])\n\n# this is the load balance function\ndef loadBalance(lst):\n lstA = []\n lstB = []\n lst.sort()\n for item in reversed(lst):\n if sum(lstA) < sum(lstB):\n lstA += [item]\n else:\n lstB += [item]\n return (lstA, lstB)" }, { "alpha_fraction": 0.6516128778457642, "alphanum_fraction": 0.6903225779533386, "avg_line_length": 44.814815521240234, "blob_id": "8f50338cf74ea4d4765bee6acad49bde070b9ea8", "content_id": "87511baea63d1513ee723aeda25415f736998f2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 89, "num_lines": 27, "path": "/15112-CMU/Design Proposal and TP/TP/Background.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this file only contains the required background class\nimport pygame\nfrom Colors import *\npygame.init()\n\ndef getGameDimension():\n displayWidth = 1100\n displayHeight = 680\n scoreTableWidth = displayWidth\n scoreTableHeight = 60\n return (displayWidth, displayHeight, scoreTableWidth, scoreTableHeight)\n\n\nclass Background(pygame.Surface):\n def __init__(self):\n super(Background, self).__init__((getGameDimension()[0], getGameDimension()[1]))\n # the rect is the score table of this game\n pygame.draw.rect(self, Colors().FloralWhite,\n (0, 0, getGameDimension()[2], getGameDimension()[3]))\n backgroundImg = pygame.image.load('./assets/images/field/soccer-field.png')\n backgroundImg2 = pygame.image.load('./assets/images/background/WechatIMG174.png')\n backgroundImg3 = pygame.image.load('./assets/images/background/WechatIMG173.png')\n resizedbackgroundImg2 = pygame.transform.scale(backgroundImg2, (1100, 60))\n resizedbackgroundImg3 = pygame.transform.scale(backgroundImg3, (300, 60))\n self.blit(backgroundImg, (0, getGameDimension()[3]))\n self.blit(resizedbackgroundImg2, (0, 0))\n self.blit(resizedbackgroundImg3, (800, 0))\n\n\n\n" }, { "alpha_fraction": 0.5458663702011108, "alphanum_fraction": 0.5968289971351624, "avg_line_length": 28.46666717529297, "blob_id": "20f493df2a0d50ea3981622100802312db92f53b", "content_id": "0c004ad327177de0cb326b4b16a6f1fdeb348ad1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "no_license", "max_line_length": 80, "num_lines": 30, "path": "/15112-CMU/week4 cold cold/inverseLookAndSay.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import copy\n\ndef inverseLookAndSay(lst):\n result = []\n for element in lst:\n n = element[0]\n num = element[1]\n while n > 0:\n result.append(num)\n n -= 1\n return result\n\n\ndef _verifyInverseLookAndSayIsNondestructive():\n a = [(1,2), (2,3)]\n b = copy.copy(a)\n inverseLookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\ndef testInverseLookAndSay():\n print(\"Testing inverseLookAndSay()...\", end=\"\")\n assert(_verifyInverseLookAndSayIsNondestructive() == True)\n assert(inverseLookAndSay([]) == [])\n assert(inverseLookAndSay([(3,1)]) == [1,1,1])\n assert(inverseLookAndSay([(1,-1),(1,2),(1,7)]) == [-1,2,7])\n assert(inverseLookAndSay([(2,3),(1,8),(3,-10)]) == [3,3,8,-10,-10,-10])\n print(\"Passed.\")\n\ntestInverseLookAndSay()\n# print(inverseLookAndSay([(2,3),(1,8),(3,-10)]))" }, { "alpha_fraction": 0.5268199443817139, "alphanum_fraction": 0.5440613031387329, "avg_line_length": 26, "blob_id": "8eb43563b476de4227c36837950ad3768aabf8d5", "content_id": "a3a826113ddf84bdaee372ace52427210166fc19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1566, "license_type": "no_license", "max_line_length": 95, "num_lines": 58, "path": "/15112-CMU/week9/laozizijixiang.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def generate(left, right, string, res):\n if left == 0 and right == 0:\n res.add(string)\n return res\n else:\n if left > 0:\n generate(left - 1, right, string + '(', res)\n if right > left:\n generate(left, right - 1, string + ')', res)\n\n\ndef generateValidParentheses(n):\n res = set()\n if n == 0:\n return set()\n elif n % 2 != 0:\n return set()\n left, right = (n / 2), (n / 2)\n generate(left, right, \"\", res)\n return res\n\n\ndef testGenerateValidParentheses():\n print(\"Testing generateValidParentheses...\", end=\"\")\n assert(generateValidParentheses(4) == { \"(())\", \"()()\" })\n assert(generateValidParentheses(6) == { \"((()))\", \"()(())\", \"(())()\", \"(()())\", \"()()()\" })\n assert(generateValidParentheses(5) == set())\n assert(generateValidParentheses(0) == set())\n print(\"Passed!\")\n\n# print(generateValidParentheses(4))\n# print(generateValidParentheses(6))\n# print(generateValidParentheses(2))\n\ndef generateparentheses(n):\n res = set()\n if n == 0:\n return set()\n elif n % 2 != 0:\n return set()\n left, right = 0, 0\n n = n / 2\n generator(left, right, n, \"\", res)\n return res\n\ndef generator(left, right, n, string, res):\n if right == n:\n res.add(string)\n return res\n else:\n print(string)\n if left < n:\n generator(left + 1, right, n, string + \"(\", res)\n if right < left:\n generator(left, right + 1, n, string + \")\", res)\n\nprint(generateparentheses(6))\nprint(generateparentheses(4))\n" }, { "alpha_fraction": 0.5572143793106079, "alphanum_fraction": 0.6275577545166016, "avg_line_length": 30.9887638092041, "blob_id": "5ee93fdfbe18dde2ce45df9fdc2c9f977a34872e", "content_id": "247ebe89a7cfe3e2e929608f8d563fcf4ff62607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11387, "license_type": "no_license", "max_line_length": 80, "num_lines": 356, "path": "/15112-CMU/week1/hw1/hw1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw1\n# Your andrewID:mxu2\n# Your section: 2 N\n#################################################\n\nimport cs112_s19_week1_linter\n\n# For collaborative problems, you must list your collaborators!\n# Each collaborative problem has a function which you should modify to \n# return a comma-separated string with the andrewIDs of your collaborators.\n# Here is an example which you should not modify!\ndef exampleCollaborators():\n return \"mdtaylor, krivers, acarnegie\"\n\n#################################################\n# Lab1 COLLABORATIVE LAB problems \n# (Their problem descriptions will be released Friday, Jan 18)\n#################################################\n# The problems in this section are LAB PROBLEMS, which means you MUST\n# work on these with at least one collaborator. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n# For lab problems, YOU MUST LIST AT LEAST ONE COLLABORATOR\n\n\n#### distance is a COLLABORATIVE problem ####\n# Modify the output of distanceCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef distanceCollaborators():\n return \"nobody\"\n\ndef distance(x1, y1, x2, y2):\n return\n \n#### isRightTriangle is a COLLABORATIVE problem ####\n# Modify the output of isRightTriangleCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef isRightTriangleCollaborators():\n return \"nobody\"\n \ndef isRightTriangle(x1, y1, x2, y2, x3, y3):\n return\n\n#### roundPegRectangularHole and rectangularPegRoundHole are COLLABORATIVE ####\n# Modify the output of pegProblemCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef pegProblemCollaborators():\n return \"nobody\"\n \ndef roundPegRectangularHole(r, w, h):\n return\n \ndef rectangularPegRoundHole(r, w, h):\n return\n \n \n \n \n \n \n#################################################\n# Hw1 COLLABORATIVE problem\n#################################################\n# The problems in this section are COLLABORATIVE, which means you may\n# work on them with your classmates if you wish. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n \n#### nearestOdd is a COLLABORATIVE problem ####\n# Modify the output of nearestOddCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef nearestOddCollaborators():\n return \"nobody\"\n \ndef nearestOdd(n):\n return\n\n#### colorBlender is a COLLABORATIVE problem ####\n# Modify the output of colorBlenderCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef colorBlenderCollaborators():\n return \"nobody\"\n \ndef colorBlender(rgb1, rgb2, midpoints, n):\n return\n\n\n\n\n\n\n#################################################\n# Hw1 SOLO problems\n#################################################\n# These problems must be completed WITHOUT COLLABORATION. See the collaboration\n# policy in the syllabus for more details. You may always use piazza, \n# office hours, and other official 15-112 course resources for questions.\n\ndef syllabusAnswer():\n return \"\"\"\n1:\n2:\n3:\n4:\n5: \n\"\"\"\n\ndef debuggingAnswer():\n return \"Your answer here\"\n\ndef rocAnswer():\n return\n\n\n#### the following three functions go together ####\n# Note: You'll need to use distance(x1,y1,x2,y2) as a helper function!\n# Wait to do this problem until after you write distance in Friday's lab\n\ndef lineIntersection(m1, b1, m2, b2):\n return \n\ndef triangleArea(s1, s2, s3):\n return\n\ndef threeLinesArea(m1, b1, m2, b2, m3, b3):\n return\n\n#### the following two functions go together ####\n\ndef getKthDigit(n, k):\n return\n\ndef setKthDigit(n, k, d):\n return\n\n\n#### bonusFindIntRootsOfCubic is a bonus problem, and therefore optional ####\n# Note: Bonus problems are solo. Do not collaborate on bonus problems. \n \ndef bonusFindIntRootsOfCubic(a, b, c, d):\n return\n\n\n\n\n\n\n#################################################\n# Hw1 Test Functions\n# ignore_rest\n#################################################\n\ndef testDistance():\n import math\n print(\"Testing distance()...\", end=\"\")\n assert(math.isclose(distance(0, 0, 1, 1), 2**0.5))\n assert(math.isclose(distance(3, 3, -3, -3), 6*2**0.5))\n assert(math.isclose(distance(20, 20, 23, 24), 5))\n print(\"Passed.\")\n\ndef testIsRightTriangle():\n print('Testing isRightTriangle()... ', end='')\n assert(isRightTriangle(0, 0, 0, 3, 4, 0) == True)\n assert(isRightTriangle(1, 1.3, 1.4, 1, 1, 1) == True)\n assert(isRightTriangle(9, 9.12, 8.95, 9, 9, 9) == True)\n assert(isRightTriangle(0, 0, 0, math.pi, math.e, 0) == True)\n assert(isRightTriangle(0, 0, 1, 1, 2, 0) == True)\n assert(isRightTriangle(0, 0, 1, 2, 2, 0) == False)\n assert(isRightTriangle(1, 0, 0, 3, 4, 0) == False)\n print('Passed.')\n\n \ndef testRoundPegRectangularHole():\n print(\"Testing roundPegRectangularHole()...\", end=\"\")\n assert(roundPegRectangularHole(1,2,3)==True)\n assert(roundPegRectangularHole(4,5,6)==False)\n assert(roundPegRectangularHole(1,20,10)==True)\n assert(roundPegRectangularHole(10,2,30)==False)\n print(\"Passed.\")\n \ndef testRectangularPegRoundHole():\n print(\"Testing rectangularPegRoundHole()...\", end=\"\")\n assert(rectangularPegRoundHole(1,2,3)==False)\n assert(rectangularPegRoundHole(5,4,6)==True)\n assert(rectangularPegRoundHole(2,4,4)==False)\n assert(rectangularPegRoundHole(5,8,6)==True)\n assert(rectangularPegRoundHole(6,10,8)==False)\n print(\"Passed.\")\n \ndef testNearestOdd():\n print('Testing nearestOdd()... ', end='')\n assert(nearestOdd(13) == 13)\n assert(nearestOdd(12.001) == 13)\n assert(nearestOdd(12) == 11)\n assert(nearestOdd(11.999) == 11)\n assert(nearestOdd(-13) == -13)\n assert(nearestOdd(-12.001) == -13)\n assert(nearestOdd(-12) == -13)\n assert(nearestOdd(-11.999) == -11)\n print('Passed.')\n\ndef testColorBlender():\n print(\"Testing colorBlender()...\", end=\"\")\n # http://meyerweb.com/eric/tools/color-blend/#DC143C:BDFCC9:3:rgbd\n assert(colorBlender(220020060, 189252201, 3, -1) == None)\n assert(colorBlender(220020060, 189252201, 3, 0) == 220020060)\n assert(colorBlender(220020060, 189252201, 3, 1) == 212078095)\n assert(colorBlender(220020060, 189252201, 3, 2) == 205136131)\n assert(colorBlender(220020060, 189252201, 3, 3) == 197194166)\n assert(colorBlender(220020060, 189252201, 3, 4) == 189252201)\n assert(colorBlender(220020060, 189252201, 3, 5) == None)\n # http://meyerweb.com/eric/tools/color-blend/#0100FF:FF0280:2:rgbd\n assert(colorBlender(1000255, 255002128, 2, -1) == None)\n assert(colorBlender(1000255, 255002128, 2, 0) == 1000255)\n assert(colorBlender(1000255, 255002128, 2, 1) == 86001213)\n assert(colorBlender(1000255, 255002128, 2, 2) == 170001170)\n assert(colorBlender(1000255, 255002128, 2, 3) == 255002128)\n print(\"Passed.\")\n\ndef testSyllabusAnswer():\n print(\"Your answer to the syllabus question is:\")\n print(syllabusAnswer())\n print(\"The TAs will grade this later.\")\n print()\n\ndef testDebuggingAnswer():\n print(\"Your answer to the debugging question is:\")\n print(debuggingAnswer())\n print(\"The TAs will grade this later.\")\n print()\n\ndef roc(x):\n if type(x) != int:\n return False\n elif x <= 120:\n return False\n elif x % 100 == x - 100:\n a = x // 10\n b = x % 10\n if a != 2 * b:\n return False\n return True\n else:\n return x == 42\n\ndef testRocAnswer():\n print(\"Testing rocAnswer()...\", end=\"\")\n answer = rocAnswer()\n assert(roc(answer) == True)\n print(\"Passed.\")\n\ndef testLineIntersection():\n import math\n print(\"Testing lineIntersection()...\", end=\"\")\n assert(lineIntersection(2.5, 3, 2.5, 11) == None)\n assert(lineIntersection(25, 3, 25, 11) == None)\n # y=3x-5 and y=x+5 intersect at (5,10)\n assert(math.isclose(lineIntersection(3,-5,1,5), 5))\n # y=10x and y=-4x+35 intersect at (2.5,25)\n assert(math.isclose(lineIntersection(10,0,-4,35), 2.5))\n print(\"Passed.\")\n\ndef testTriangleArea():\n import math\n print(\"Testing triangleArea()...\", end=\"\")\n assert(math.isclose(triangleArea(3,4,5), 6))\n assert(math.isclose(triangleArea(2**0.5, 1, 1), 0.5))\n assert(math.isclose(triangleArea(2**0.5, 2**0.5, 2), 1))\n print(\"Passed.\")\n\ndef testThreeLinesArea():\n import math\n print(\"Testing threeLinesArea()...\", end=\"\")\n assert(math.isclose(threeLinesArea(1, 2, 3, 4, 5, 6), 0))\n assert(math.isclose(threeLinesArea(0, 7, 1, 0, -1, 2), 36))\n assert(math.isclose(threeLinesArea(0, 3, -.5, -5, 1, 3), 42.66666666666))\n assert(math.isclose(threeLinesArea(1, -5, 0, -2, 2, 2), 25))\n assert(math.isclose(threeLinesArea(0, -9.75, -6, 2.25, 1, -4.75), 21))\n print(\"Passed.\")\n\ndef testGetKthDigit():\n print(\"Testing getKthDigit()...\", end=\"\")\n assert(getKthDigit(809, 0) == 9)\n assert(getKthDigit(809, 1) == 0)\n assert(getKthDigit(809, 2) == 8)\n assert(getKthDigit(809, 3) == 0)\n assert(getKthDigit(0, 100) == 0)\n assert(getKthDigit(-809, 0) == 9)\n print(\"Passed.\")\n\ndef testSetKthDigit():\n print(\"Testing setKthDigit()...\", end=\"\")\n assert(setKthDigit(809, 0, 7) == 807)\n assert(setKthDigit(809, 1, 7) == 879)\n assert(setKthDigit(809, 2, 7) == 709)\n assert(setKthDigit(809, 3, 7) == 7809)\n assert(setKthDigit(0, 4, 7) == 70000)\n assert(setKthDigit(-809, 0, 7) == -807)\n print(\"Passed.\")\n\ndef getCubicCoeffs(k, root1, root2, root3):\n # Given roots e,f,g and vertical scale k, we can find\n # the coefficients a,b,c,d as such:\n # k(x-e)(x-f)(x-g) =\n # k(x-e)(x^2 - (f+g)x + fg)\n # kx^3 - k(e+f+g)x^2 + k(ef+fg+eg)x - kefg\n e,f,g = root1, root2, root3\n return k, -k*(e+f+g), k*(e*f+f*g+e*g), -k*e*f*g\n\ndef testFindIntRootsOfCubicCase(k, z1, z2, z3):\n import math\n a,b,c,d = getCubicCoeffs(k, z1, z2, z3)\n result1, result2, result3 = bonusFindIntRootsOfCubic(a,b,c,d)\n m1 = min(z1, z2, z3)\n m3 = max(z1, z2, z3)\n m2 = (z1+z2+z3)-(m1+m3)\n actual = (m1, m2, m3)\n assert(math.isclose(m1, result1))\n assert(math.isclose(m2, result2))\n assert(math.isclose(m3, result3))\n\ndef testBonusFindIntRootsOfCubic():\n print(\"Testing bonusFindIntRootsOfCubic()...\", end=\"\")\n testFindIntRootsOfCubicCase(5, 1, 3, 2)\n testFindIntRootsOfCubicCase(2, 5, 33, 7)\n testFindIntRootsOfCubicCase(-18, 24, 3, -8)\n testFindIntRootsOfCubicCase(1, 2, 3, 4)\n print(\"Passed.\")\n\n#################################################\n# Hw1 Main\n#################################################\n\ndef testAll():\n testDistance()\n testRoundPegRectangularHole()\n testRectangularPegRoundHole()\n testNearestOdd()\n testColorBlender()\n testSyllabusAnswer()\n testDebuggingAnswer()\n testRocAnswer()\n testLineIntersection()\n testTriangleArea()\n testThreeLinesArea()\n testGetKthDigit()\n testSetKthDigit()\n \n #Uncomment the next line if you want to try the bonus!\n #testBonusFindIntRootsOfCubic() \n\ndef main():\n cs112_s19_week1_linter.lint() # check for banned tokens\n testAll()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.574457585811615, "alphanum_fraction": 0.6198225021362305, "avg_line_length": 32.81666564941406, "blob_id": "eb750bbb4d37161b52a44d2179833240f2139ed6", "content_id": "8611a50ee9edb00ab017443ed9a2eb00b6abb9be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2028, "license_type": "no_license", "max_line_length": 72, "num_lines": 60, "path": "/15112-CMU/week1/week1/week1+.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import math\n\nfrom tkinter import *\n\ndef drawClock(canvas, x0, y0, x1, y1, hour, minute):\n # draw a clock in the area bounded by (x0,y0) in\n # the top-left and (x1,y1) in the bottom-right\n # with the given time\n # draw an outline rectangle\n canvas.create_rectangle(x0, y0, x1, y1, outline=\"black\", width=1)\n\n # find relevant values for positioning clock\n width = (x1 - x0)\n height = (y1 - y0)\n r = min(width, height)/2\n cx = (x0 + x1)/2\n cy = (y0 + y1)/2\n\n # draw the clock face\n canvas.create_oval(cx-r, cy-r, cx+r, cy+r, outline=\"black\", width=2)\n\n # adjust the hour to take the minutes into account\n hour += minute/60.0\n\n # find the hourAngle and draw the hour hand\n # but we must adjust because 0 is vertical and\n # it proceeds clockwise, not counter-clockwise!\n hourAngle = math.pi/2 - 2*math.pi*hour/12\n hourRadius = r*1/2\n hourX = cx + hourRadius * math.cos(hourAngle)\n hourY = cy - hourRadius * math.sin(hourAngle)\n canvas.create_line(cx, cy, hourX, hourY, fill=\"black\", width=1)\n\n # repeat with the minuteAngle for the minuteHand\n minuteAngle = math.pi/2 - 2*math.pi*minute/60\n minuteRadius = r*9/10\n minuteX = cx + minuteRadius * math.cos(minuteAngle)\n minuteY = cy - minuteRadius * math.sin(minuteAngle)\n canvas.create_line(cx, cy, minuteX, minuteY, fill=\"black\", width=1)\n\ndef draw(canvas, width, height):\n # Draw a large clock showing 2:30\n drawClock(canvas, 25, 25, 175, 150, 2, 30)\n\n # And draw a smaller one below it showing 7:45\n drawClock(canvas, 75, 160, 125, 200, 7, 45)\n\n # Now let's have some fun and draw a whole grid of clocks!\n width = 40\n height = 40\n margin = 5\n hour = 0\n for row in range(3):\n for col in range(4):\n left = 200 + col * width + margin\n top = 50 + row * height + margin\n right = left + width - margin\n bottom = top + height - margin\n hour += 1\n drawClock(canvas, left, top, right, bottom, hour, 0)" }, { "alpha_fraction": 0.6296758055686951, "alphanum_fraction": 0.6346633434295654, "avg_line_length": 20.13157844543457, "blob_id": "dae0e1c45eee71251b0f3405784e2fcf36469f34", "content_id": "95f55dcad6c7ab02eff16bf123e921325e4f62cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 75, "num_lines": 38, "path": "/15112-CMU/week3/test.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "s = \"\"\"\\\nWe hold these truths to be\nself-evident: that all men are\ncreated equal; that they are\nendowed by their Creator with\ncertain unalienable rights;\nthat among these are life,\nliberty, and the pursuit of\nhappiness.\"\"\"\n\nL = s.split('\\n')\nprint(L)\nres = \"\"\nfor line in L:\n blanks = 30 - len(line)\n res += (\" \" * blanks + line + '\\n')\nprint(res)\n\n\ntext = \"dadadffe\"\ni = 1\nprint(text[:i] + \"\\n\" + text[i+1:])\n\n\n# a = \"fujian xuming ruicheng zuowei guofu\"\n#\n# s = a.split()\n#\n# print(\" \".join(s))\n#\n# text = \"\"\"\\\n# We hold these truths to be self-evident: that all men are created equal;\n# that they are endowed by their Creator with certain unalienable rights;\n# that among these are life, liberty, and the pursuit of happiness.\"\"\"\n#\n# element = text.split()\n#\n# print(\" \".join(element))" }, { "alpha_fraction": 0.5835616588592529, "alphanum_fraction": 0.6219177842140198, "avg_line_length": 28.594594955444336, "blob_id": "e9c463ada7b3b6f37aaa6c28ad2e8c8ae254678a", "content_id": "c706851fbdffd6d0fce20e74a60301e4a8f24c26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 63, "num_lines": 37, "path": "/15112-CMU/112-opencv-tutorial-master/edges.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\nwindow_name = \"Webcam!\"\ncam_index = 1 #my computer's camera is index 1, usually it's 0\ncv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\ncap = cv2.VideoCapture(cam_index)\ncap.open(cam_index)\n\nfindVertEdges = False\nfindHorzEdges = False\nfindAllEdges = False\nwhile True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.blur(gray, (3,3))\n if frame is not None:\n if findVertEdges:\n frame = cv2.Sobel(frame, cv2.CV_64F, 1, 0, ksize=5)\n if findHorzEdges:\n frame = cv2.Sobel(frame, cv2.CV_64F, 0, 1, ksize=5)\n if findAllEdges:\n frame = cv2.Canny(frame, 100, 200) \n cv2.imshow(window_name, frame)\n k = cv2.waitKey(10) & 0xFF\n if k == 27: #ESC key quits the program\n cv2.destroyAllWindows()\n cap.release()\n break\n elif k == ord('v'):\n findVertEdges = not findVertEdges\n elif k == ord('h'):\n findHorzEdges = not findHorzEdges\n elif k == ord('a'):\n findAllEdges = not findAllEdges\n" }, { "alpha_fraction": 0.5726717114448547, "alphanum_fraction": 0.5841510891914368, "avg_line_length": 33.40764236450195, "blob_id": "b8d1af667068c176ce59f05b5004d617ff8cbbac", "content_id": "aeb2e38d7df3cc6c29c0c53d13348679d6868f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5401, "license_type": "no_license", "max_line_length": 79, "num_lines": 157, "path": "/15112-CMU/week9/notes-recursion-floodFill.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# FloodFill using Tkinter\n# grid-based (not pixel-based), with animation\n# and numeric display of depth of recursion\n\nfrom tkinter import *\n\nclass Cell(object):\n def __init__(self):\n self.depth = self.ordinal = -1 # set by floodFill\n self.displayLabel = False\n self.isWall = False\n\ndef init(data):\n instructHeight = 160\n data.cellSize = 40\n data.margin = 5 # margin around grid\n data.rows = (data.height - instructHeight - 2*data.margin) // data.cellSize\n data.cols = (data.width - 2*data.margin) // data.cellSize\n data.cells = [ [ Cell() for col in range(data.cols) ] \n for row in range(data.rows) ]\n data.floodFillIndex = 0\n data.displayOrdinals = False\n\ndef clearLabels(data):\n for row in range(data.rows):\n for col in range(data.cols):\n cell = data.cells[row][col]\n cell.depth = cell.ordinal = -1\n data.floodFillOrder = [ ]\n data.floodFillIndex = 0\n data.displayOrdinals = False\n\ndef floodFill(data, row, col, depth=0):\n if ((row < 0) or (row >= data.rows) or\n (col < 0) or (col >= data.cols)):\n return # off-board!\n cell = data.cells[row][col]\n if (cell.isWall == True):\n return # hit a wall\n if (cell.depth >= 0):\n return # already been here\n\n # \"fill\" this cell\n cell.depth = depth\n cell.ordinal = len(data.floodFillOrder)\n data.floodFillOrder.append(cell)\n\n # then recursively fill its neighbors\n floodFill(data, row-1, col, depth+1)\n floodFill(data, row+1, col, depth+1)\n floodFill(data, row, col-1, depth+1)\n floodFill(data, row, col+1, depth+1)\n\ndef mousePressed(event, data):\n shift = ((event.state & 0x0001) != 0) # Fancy detection of shift-click!\n clearLabels(data)\n col = (event.x - data.margin) // data.cellSize\n row = (event.y - data.margin) // data.cellSize\n if 0 <= col < data.cols and 0 <= row < data.rows:\n if (shift == False):\n data.cells[row][col].isWall = not data.cells[row][col].isWall\n else:\n data.cells[row][col].isWall = False\n floodFill(data, row, col)\n\ndef keyPressed(event, data):\n if (event.keysym == \"d\"):\n data.displayOrdinals = False\n elif (event.keysym == \"o\"):\n data.displayOrdinals = True\n elif (event.keysym == \"r\"):\n init(data)\n\ndef timerFired(data):\n data.floodFillIndex += 1\n\ndef redrawAll(canvas, data):\n for row in range(data.rows):\n for col in range(data.cols):\n left = data.margin + col * data.cellSize\n top = data.margin + row * data.cellSize\n cell = data.cells[row][col]\n fill = \"pink\" if (cell.isWall) else \"cyan\"\n canvas.create_rectangle(left, top, \n left + data.cellSize, top + data.cellSize, fill=fill)\n if ((cell.depth >= 0) and (cell.ordinal < data.floodFillIndex)):\n if (data.displayOrdinals == True):\n label = \"# \" + str(cell.ordinal)\n else:\n label = str(cell.depth)\n canvas.create_text(left + data.cellSize/2, \n top + data.cellSize/2,\n text=label, font=\"Arial 12 bold\", \n fill=\"darkGreen\")\n drawHelpText(canvas, data)\n\ndef drawHelpText(canvas, data):\n message = \"\"\"\nClick to toggle walls\nShift-click to floodFill from cell\nPress 'd' to display depths\nPress 'o' to display #ordinals\nPress 'r' to reset\n\"\"\"\n canvas.create_text(data.width/2, data.cellSize * data.rows, anchor=\"n\",\n text = message, font=\"Arial 18 bold\", \n fill=\"darkBlue\", justify=\"center\")\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(610, 550)" }, { "alpha_fraction": 0.5599620342254639, "alphanum_fraction": 0.5867173075675964, "avg_line_length": 29.76288604736328, "blob_id": "8be4a14bb92fb683f4a0196e2031d1d0946b6193", "content_id": "ab0c794d6208f231e292acacb42baa6e35d432e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17903, "license_type": "no_license", "max_line_length": 90, "num_lines": 582, "path": "/15112-CMU/week4 cold cold/hw4 copy.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw4\n# Your andrewID:yufeiche\n# Your section: J\n#################################################\n\nimport math\nimport copy\n \n#################################################\n# Hw4 COLLABORATIVE problems\n#################################################\n# The problem in this section is COLLABORATIVE, which means you may\n# work on it with your classmates if you wish. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n\ndef lookAndSayCollaborators():\n return \"nobody\"\n\n# A function that return the \"reading off\" list\ndef lookAndSay(lst):\n newList = []\n # consider an exception\n if lst == []:\n return []\n else:\n # create non-destructively function\n createLst = copy.copy(lst)\n i = 0\n while i < len(createLst):\n newValue = createLst[i]\n countNumber = 0\n while (i + countNumber < len(createLst) and lst[i + countNumber] == newValue):\n countNumber += 1\n i += countNumber\n newList += [(countNumber, newValue)]\n return newList\n\n\ndef inverseLookAndSayCollaborators():\n return \"nobody\"\n\n# a function that does the inverse of the function lookAndSay \ndef inverseLookAndSay(lst):\n # consider an exception\n if lst == []:\n return []\n else:\n # create a new list\n newList = []\n # find every tuple in list\n for i in range(len(lst)):\n (x,y) = lst[i]\n # add item y to newList x times\n for j in range(x):\n newList += [y]\n return newList\n\n#################################################\n# Hw4 SOLO problems\n#################################################\n\n# a function that non destructively remove the repeated values in list\ndef nondestructiveRemoveRepeats(lst):\n # non destructively copy the list\n copyLst = copy.copy(lst)\n newList = []\n i = 0\n while i < len(copyLst):\n newValue = copyLst[i]\n # count the number of newValue in string\n countNumber = copyLst.count(newValue)\n newList += [copyLst[i]]\n # leave only one per value in string\n for j in range(countNumber):\n copyLst.remove(newValue)\n i = 0\n return newList\n\n# a function that destructively remove the repeated values in list\ndef destructiveRemoveRepeats(lst):\n i = 0\n # reverse the list\n lst.reverse()\n while i < len(lst):\n newValue = lst[i]\n # count the number of newValue in string\n countNumber = lst.count(newValue)\n if countNumber == 1:\n i += 1\n # leave only one per value in string\n for j in range(countNumber-1):\n lst.remove(newValue)\n lst.reverse()\n\n# return the letterScore value of all the sting in a list\ndef returnValue(letterScore,lst):\n totalValue = 0\n for i in range(len(lst)):\n # order the string in alphabet\n j = ord(lst[i])-97\n totalValue += letterScore[j]\n return totalValue\n\n# return whether alphabet in list can make up word\ndef createWords(word,lst):\n newStr = \"\"\n lst = copy.copy(lst)\n i = 0\n j = 0\n while i < len(word):\n while j < len(lst):\n # a new string to add alphabets that match\n if word[i] == lst[j]:\n newStr += word[i]\n lst[j] = \" \" \n j = len(lst)\n else:\n j += 1\n i += 1\n j = 0\n # whether the match alphabets equal to the oroginal word\n if newStr == word:\n return True\n else:\n return False\n\n# return the highest score as well as match words from dictionary\ndef bestScrabbleScore(dictionary,letterScore,hand):\n highestScore = 0\n returnWord = []\n newStr = \"\"\n for i in range(len(dictionary)):\n # see if string in hand matches word in dictionary\n if createWords(dictionary[i],hand) == True:\n # if match, calculate the value of that word\n totalValue = returnValue(letterScore,dictionary[i])\n # compare its score with the highest score\n if highestScore < totalValue:\n highestScore = totalValue\n returnWord = [dictionary[i]]\n elif highestScore == totalValue:\n returnWord += [dictionary[i]]\n \n if len(returnWord) == 1:\n return(returnWord[0], highestScore)\n # if no word match, return none\n elif len(returnWord) == 0:\n return None\n else:\n return (returnWord,highestScore)\n \n#################################################\n# Hw4 Graphics & Animation Functions\n# All graphics must go under here to avoid angering the autograder!\n# ignore_rest\n#################################################\nfrom tkinter import *\n\n## Tortoise Animation functions ##\n## Note - the Tortoise animation is collaborative! ##\n\n## Tortoise Animation bonus features: none ##\n\ndef tortoiseAnimationCollaborators():\n return \"nobody\"\n\n# initialize the GUI\ndef init(data):\n data.x = data.width/2\n data.y = data.height/2\n data.angle = 0\n data.lineX = data.width/50\n data.lineY = data.height/40\n \n # breaks code into series of commands\n data.counter = 0\n data.commands = data.code.split(\"\\n\")\n \n # keeps track of lines\n data.lineCenter = []\n \n # keeps track of rectangles\n data.color = \"black\"\n data.text = \"\"\n colorTuple = (\"red\",\"orange\",\"yellow\",\"green\",\"blue\",\"purple\",\"white\",\n \"white\",\"white\",\"white\")\n data.rectlist = [(i*data.width/10,data.height-data.width/10, (i+1) * \n data.width/10,data.height, colorTuple[i]) for i in range(10)]\n\n\n# register mouse press\ndef mousePressed(event, data):\n # ckecks if first 7 rectangles are clicked\n for r in data.rectlist[:6]:\n if (event.x > r[0] and event.x < r[2] and event.y > r[1] \n and event.y < r[3]):\n data.counter += 1\n data.commands.insert(data.counter, \"color \"+ r[4])\n if (event.x > data.rectlist[6][0] and event.x < data.rectlist[6][2] and\n event.y > data.rectlist[6][1] and event.y < data.rectlist[6][3]):\n data.counter += 1\n data.commands.insert(data.counter, \"color none\")\n # checks if last 3 rectangles are clicked\n for i in range(3):\n if (event.x > data.rectlist[7 + i][0] and \n event.x < data.rectlist[7 + i][2] and \n event.y > data.rectlist[7 + i][1] and \n event.y < data.rectlist[7 + i][3]):\n data.counter += 1\n data.commands.insert(data.counter, \"move \" + str((5, 25, 50)[i] ) ) \n\n# registers keypress\ndef keyPressed(event, data):\n if event.keysym == \"Return\":\n data.counter += 1\n elif event.keysym == \"Left\":\n data.counter += 1\n data.commands.insert(data.counter, \"left 30\")\n elif event.keysym == \"Right\":\n data.counter += 1\n data.commands.insert(data.counter, \"right 30\")\n\n# helper function called by redrawAll and draws graphics \ndef runProgram(canvas, data, currentLine):\n if currentLine < len(data.commands):\n data.text += (data.commands[currentLine] + \"\\n\")\n moveDistance(canvas,data, currentLine)\n turnAngle(canvas,data, currentLine)\n else:\n data.counter = len(data.commands) - 1\n canvas.create_text(data.lineX,data.lineY, text = data.text, fill = \"grey\", \n anchor = \"nw\")\n \n drawLines(canvas,data)\n drawArrow(canvas,data.x,data.y,data.angle)\n \n # draws squares on the bottom\n sideRectangle = data.width/10\n colorTuple = (\"red\",\"orange\",\"yellow\",\"green\",\"blue\",\"purple\",\"white\",\n \"white\",\"white\",\"white\")\n for r in data.rectlist:\n canvas.create_rectangle(r[0],r[1],r[2],r[3],fill = r[4], width = 3)\n canvas.create_text(data.width*7.5/10,data.height-data.width/20,text = \"5\")\n canvas.create_text(data.width*8.5/10,data.height-data.width/20,text = \"25\")\n canvas.create_text(data.width*9.5/10,data.height-data.width/20,text = \"50\") \n \n\n# helper function controls the movement of the graphic\ndef moveDistance(canvas,data,currentLine):\n currentContent = data.commands[currentLine]\n newList = currentContent.split(\" \")\n if newList[0] == \"move\":\n newX = data.x + int(newList[1])*math.cos(math.radians(data.angle))\n newY = data.y - int(newList[1])*math.sin(math.radians(data.angle))\n data.lineCenter += [(data.x,data.y,newX,newY,data.color)]\n data.x = newX\n data.y = newY\n if newList[0] == \"color\":\n data.color = newList[1]\n \n# helper function draws the line \ndef drawLines(canvas,data):\n for tuple in data.lineCenter:\n if(tuple[4] != 'none'):\n canvas.create_line(tuple[0],tuple[1],tuple[2],tuple[3],\n fill = tuple[4],width = 5)\n \n \n# helper function controls the angle of the graphic\ndef turnAngle(canvas,data,currentLine):\n currentContent = data.commands[currentLine]\n newList = currentContent.split(\" \")\n if newList[0] == \"left\":\n data.angle += int(newList[1])\n elif newList[0] == \"right\":\n data.angle -= int(newList[1])\n \n# main function\ndef redrawAll(canvas, data):\n runProgram(canvas, data, data.counter)\n \n\n\n\"\"\" This function is provided as part of the starter code.\nYou don't need to change it, but you should call it!\"\"\"\ndef drawArrow(canvas, x, y, angle):\n offset = 135\n r = 10\n x1 = x + r*math.cos(math.radians(angle))\n y1 = y - r*math.sin(math.radians(angle))\n x2 = x + r*math.cos(math.radians(angle + offset))\n y2 = y - r*math.sin(math.radians(angle + offset))\n x3 = x + r*math.cos (math.radians(angle - offset))\n y3 = y - r*math.sin(math.radians(angle - offset))\n canvas.create_polygon(x1, y1, x2, y2, x3, y3, fill=\"black\")\n\n### Timeline Game is a bonus problem, and therefore optional ###\n# Note: Bonus problems are solo. Do not collaborate on bonus problems.\n\n## Timeline Game functions ##\n\n\"\"\" This function is provided as part of the starter code.\nYou don't need to change it, but you should call it!\"\"\"\ndef starterCards():\n import random\n cards = [ (\"Domestication of the Cat\", -4500),\n (\"Creation of the Pythagorean Theorem\", -548),\n (\"Invention of Chess\", 570),\n (\"First Calculating Machine\", 1642), \n (\"Invention of the Telegraph\", 1837),\n (\"Invention of Morse Code\", 1838),\n (\"Invention of the Plastic Bottle\", 1963), \n (\"Invention of the Computer Mouse\", 1963), \n (\"Invention of the Laptop Computer\", 1981),\n (\"First Public Internet Access\", 1990)\n ]\n random.shuffle(cards)\n return cards\n\ndef initTimeline(data):\n pass\n\ndef mousePressedTimeline(event, data):\n pass\n\ndef keyPressedTimeline(event, data):\n pass\n\ndef redrawAllTimeline(canvas, data):\n pass\n\n#################################################\n# Hw4 Test Functions\n#################################################\n\ndef _verifyLookAndSayIsNondestructive():\n a = [1,2,3]\n b = copy.copy(a)\n lookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\ndef testLookAndSay():\n print(\"Testing lookAndSay()...\", end=\"\")\n assert(_verifyLookAndSayIsNondestructive() == True)\n assert(lookAndSay([]) == [])\n assert(lookAndSay([1,1,1]) == [(3,1)])\n assert(lookAndSay([-1,2,7]) == [(1,-1),(1,2),(1,7)])\n assert(lookAndSay([3,3,8,-10,-10,-10]) == [(2,3),(1,8),(3,-10)])\n print(\"Passed.\")\n\ndef _verifyInverseLookAndSayIsNondestructive():\n a = [(1,2), (2,3)]\n b = copy.copy(a)\n inverseLookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\ndef testInverseLookAndSay():\n print(\"Testing inverseLookAndSay()...\", end=\"\")\n assert(_verifyInverseLookAndSayIsNondestructive() == True)\n assert(inverseLookAndSay([]) == [])\n assert(inverseLookAndSay([(3,1)]) == [1,1,1])\n assert(inverseLookAndSay([(1,-1),(1,2),(1,7)]) == [-1,2,7])\n assert(inverseLookAndSay([(2,3),(1,8),(3,-10)]) == [3,3,8,-10,-10,-10])\n print(\"Passed.\")\n\ndef runTortoiseAnimation(code, width=500, height=500):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.code = code\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n root.mainloop() # blocks until window is closed\n\ndef testTortoiseAnimation():\n print(\"Running Tortoise Animation...\", end=\"\")\n runTortoiseAnimation(\"\"\"\n# This is a simple tortoise program\ncolor blue\nmove 50\n\nleft 90\n\ncolor red\nmove 100\n\ncolor none # turns off drawing\nmove 50\n\nright 45\n\ncolor green # drawing is on again\nmove 50\n\nright 45\n\ncolor orange\nmove 50\n\nright 90\n\ncolor purple\nmove 100\n\"\"\")\n runTortoiseAnimation(\"\"\"\n# Y\ncolor red\nright 45\nmove 50\nright 45\nmove 50\nright 180\nmove 50\nright 45\nmove 50\ncolor none # space\nright 45\nmove 25\n\n# E\ncolor green\nright 90\nmove 85\nleft 90\nmove 50\nright 180\nmove 50\nright 90\nmove 42\nright 90\nmove 50\nright 180\nmove 50\nright 90\nmove 43\nright 90\nmove 50 # space\ncolor none\nmove 25\n\n# S\ncolor blue\nmove 50\nleft 180\nmove 50\nleft 90\nmove 43\nleft 90\nmove 50\nright 90\nmove 42\nright 90\nmove 50\n\"\"\")\n print(\"Done.\")\n\ndef _verifyNondestructiveRemoveRepeatsIsNondestructive():\n a = [3, 5, 3, 3, 6]\n b = copy.copy(a)\n # ignore result, just checking for destructiveness here\n nondestructiveRemoveRepeats(a)\n return (a == b)\n\ndef testNondestructiveRemoveRepeats():\n print(\"Testing nondestructiveRemoveRepeats()\", end=\"\")\n assert(_verifyNondestructiveRemoveRepeatsIsNondestructive())\n assert(nondestructiveRemoveRepeats([1,3,5,3,3,2,1,7,5]) == [1,3,5,2,7])\n assert(nondestructiveRemoveRepeats([1,2,3,-2]) == [1,2,3,-2])\n print(\"Passed.\")\n\ndef testDestructiveRemoveRepeats():\n print(\"Testing destructiveRemoveRepeats()\", end=\"\")\n a = [1,3,5,3,3,2,1,7,5]\n assert(destructiveRemoveRepeats(a) == None)\n assert(a == [1,3,5,2,7])\n b = [1,2,3,-2]\n assert(destructiveRemoveRepeats(b) == None)\n assert(b == [1,2,3,-2])\n print(\"Passed.\")\n\ndef testBestScrabbleScore():\n print(\"Testing bestScrabbleScore()...\", end=\"\")\n def d1(): return [\"a\", \"b\", \"c\"]\n def ls1(): return [1] * 26\n def d2(): return [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"] \n def ls2(): return [1 + (i % 5) for i in range(26)]\n assert(bestScrabbleScore(d1(), ls1(), [\"b\"]) == (\"b\", 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"a\", \"c\", \"e\"]) == ([\"a\", \"c\"], 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"b\"]) == (\"b\", 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"z\"]) == None)\n # x = 4, y = 5, z = 1\n # [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\n # 10 10 7 10 9 -\n assert(bestScrabbleScore(d2(), ls2(), [\"x\",\"y\",\"z\"]) == ([\"xyz\",\"zxy\"], 10))\n assert(bestScrabbleScore(d2(), ls2(), \n [\"x\", \"y\", \"z\", \"y\"]) == ([\"xyz\", \"zxy\", \"yy\"], 10))\n assert(bestScrabbleScore(d2(), ls2(), [\"x\", \"y\", \"q\"]) == (\"yx\", 9))\n assert(bestScrabbleScore(d2(), ls2(), [\"y\", \"z\", \"z\"]) == (\"zzy\", 7))\n assert(bestScrabbleScore(d2(), ls2(), [\"w\", \"x\", \"z\"]) == None)\n print(\"Passed.\")\n\ndef runTimelineGame(width=1200, height=400):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAllTimeline(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressedTimeline(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressedTimeline(event, data)\n redrawAllWrapper(canvas, data)\n\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n initTimeline(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n root.mainloop() # blocks until window is closed\n\n#################################################\n# Hw4 Main\n#################################################\n\ndef testAll():\n ## Collaborative Functions ##\n testLookAndSay()\n testInverseLookAndSay()\n testTortoiseAnimation()\n ## Solo Functions ##\n testNondestructiveRemoveRepeats()\n testDestructiveRemoveRepeats()\n testBestScrabbleScore()\n \n # Uncomment the next line if you want to try the bonus!\n #runTimelineGame()\n\ndef main():\n testAll()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6927340030670166, "alphanum_fraction": 0.6982758641242981, "avg_line_length": 30.173076629638672, "blob_id": "85c6bcff15b659a41ab2762687d5d06c33330f60", "content_id": "64b5773cbda1db3a7a497bb5ff074c4af6773363", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1624, "license_type": "no_license", "max_line_length": 70, "num_lines": 52, "path": "/15112-CMU/FIFAworldcup copy/Text.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from const import *\n\nclass Text:\n @staticmethod\n def makeTextObject(text, color):\n surf = FONT30B.render(text, True, color)\n return surf, surf.get_rect()\n\n @staticmethod\n def makeTextPauseGame(text,color):\n surf = FONT60B.render(text, True, color)\n return surf, surf.get_rect()\n\n @staticmethod\n def showText(text, color, screen, y):\n titleSurf, titleRect = Text.makeTextObject(text, color)\n titleRect.center = (int(GAME_WIDTH + TEXT_WIDTH / 2), int(y))\n\n screen.blit(titleSurf, titleRect)\n\n @staticmethod\n def showScore(text, color, screen, x ):\n titleSurf, titleRect = Text.makeTextObject(text, color)\n titleRect.center = (x, TABLE_SCORE_HEIGHT / 2)\n\n screen.blit(titleSurf, titleRect)\n \n @staticmethod\n def showTextInPlayGain(text, color, screen, y):\n titleSurf, titleRect = Text.makeTextObject(text, color)\n titleRect.center = (int(WIDTH_OF_PLAY_AGAIN_BOARDING / 2), int(y))\n\n screen.blit(titleSurf, titleRect)\n \n @staticmethod\n def showSettingText(text, color, screen, y):\n titleSurf, titleRect = Text.makeTextObject(text, color)\n titleRect.center = (int(WIDTH_OF_PAUSE_GAME / 2), int(y))\n\n screen.blit(titleSurf, titleRect)\n\n @staticmethod\n def showTextInPauseGame(text, color, screen, y):\n titleSurf, titleRect = Text.makeTextObject(text, color)\n titleRect.center = (int(WIDTH_OF_PAUSE_GAME / 2), int(y))\n\n screen.blit(titleSurf, titleRect)\n @staticmethod\n def showTextWiner(text, color, screen,x, y):\n titleSurf, titleRect = Text.makeTextObject(text, color)\n titleRect.center = (int(x), int(y))\n screen.blit(titleSurf, titleRect)\n\n\n\n" }, { "alpha_fraction": 0.5283757448196411, "alphanum_fraction": 0.5792563557624817, "avg_line_length": 29.68000030517578, "blob_id": "5ac4db258cf18aa88c8da5b442ef40234b7c26df", "content_id": "a5c58f7931c02fc1b2ebc8b15e93f3e1bd1b6c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 80, "num_lines": 50, "path": "/15112-CMU/week9/Binary Search.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def binarySearch1(alist, item):\n first = 0\n last = len(alist) - 1\n found = False\n while first <= last and not found:\n midpoint = (first + last) // 2\n if alist[midpoint] == item:\n found = True\n else:\n if item < alist[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return found\n# print(binarySearch1([0, 1, 2, 8, 13, 17, 19, 32, 42,], 3))\n# print(binarySearch1([0, 1, 2, 8, 13, 17, 19, 32, 42,], 13))\n# print(binarySearch1([1,3,4,5],3))\n# print(binarySearch1([1,3,4,5,6],7))\n\n\ndef binarySearchRecur(alist, first, last, item, lstTuple):\n if first > last:\n return lstTuple\n midpoint = (first + last) // 2\n lstTuple.append((midpoint, alist[midpoint]))\n if alist[midpoint] == item:\n return lstTuple\n else:\n if item < alist[midpoint]:\n return binarySearchRecur(alist, first, midpoint - 1, item, lstTuple)\n else:\n return binarySearchRecur(alist, midpoint + 1, last, item, lstTuple)\n\n\ndef binarySearch(lst, item):\n first = 0\n last = len(lst) - 1\n lstTuple = []\n return binarySearchRecur(lst, first, last, item, lstTuple)\n\n\ntestlist = [0, 1, 2, 8, 13, 17, 19, 32, 42,]\ntestlist2 = ['a', 'c', 'f', 'g', 'm', 'q']\n# print(binarySearch(testlist, 3))\n# print(binarySearch(testlist, 13))\nprint(binarySearch(testlist2, \"c\"))\nprint(\"\")\nprint(binarySearch(testlist2, \"n\"))\nprint(binarySearch(testlist2, \"m\"))\nprint(binarySearch(['a', 'b', 'c', 'd', 'e'], 'a'))" }, { "alpha_fraction": 0.4933333396911621, "alphanum_fraction": 0.5293333530426025, "avg_line_length": 23.606557846069336, "blob_id": "1fea315144e0819635ff6d9a81401eb839e0b18a", "content_id": "80f8624c3a3e9f44bf15bd1a387e8119332efc82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1500, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/15112-CMU/week9/lec.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def factorial(n):\n if n == 1:\n return 1\n else:\n return n * factorial(n - 1)\n\nprint(factorial(5))\n\nprint(\"aaabbcaad\")\nprint(\"abcad\")\n\n# recursive calls\ndef shortenStrings(s):\n if len(s) == 1:\n return s\n else:\n if s[1] == s[0]:\n return shortenStrings(s[1:])\n else:\n return s[0] + shortenStrings(s[1:])\n\nprint(shortenStrings(\"aaabbcaad\"))\n\n# The fibonacci sequence is a mathematical sequence where each element is equal to\n# the sum of the two elements that came before it. This translates nicely into recursive code!\ndef fib(n, depth = 0):\n # print(\"my n is\", n)\n # print(\"fib(\",n,\") at depth\",depth)\n print(depth*\" |\", \"fib(\", n, \")\")\n if (n < 2):\n # Base case: fib(0) and fib(1) are both 1\n return 1\n else:\n # Recursive case: fib(n) = fib(n-1) + fib(n-2)\n return fib(n-1, depth + 1) + fib(n-2, depth + 1)\n\nprint([fib(n) for n in range(15)])\n# print(fib(5))\n\ndef merge(a, b):\n if len(a) == 0 or len(b) == 0:\n return a + b\n else:\n if a[0] < b[0]:\n return [a[0]] + merge(a[1:], b)\n else:\n return [b[0]] + merge(a, b[1:])\n\ndef mergeSort(lst):\n if len(lst) < 2:\n return lst\n else:\n mid = len(lst)//2\n left = lst[:mid]\n right = lst[mid:]\n left = mergeSort(left)\n right = mergeSort(right)\n return merge(left, right)\n\nprint(merge([1,2,4,7,9],[0,1,4,6,9]))\nprint(mergeSort([83,45,21,23,5]))" }, { "alpha_fraction": 0.4120016098022461, "alphanum_fraction": 0.41361257433891296, "avg_line_length": 26.898876190185547, "blob_id": "2cbabaf5f540869881ecd31d91004ebde7de1ca3", "content_id": "a03b5a657860ce71d5200ebf5d771e800c784a9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2483, "license_type": "no_license", "max_line_length": 80, "num_lines": 89, "path": "/15112-CMU/week9/findpath.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this is the main function for findCategoryPath\n\n\n\ndef recurHelper(d, value, lst):\n for key in d:\n if (type(d[key])) != dict:\n if d[key] == value:\n lst.append(key)\n return lst\n else:\n if recurHelper(d[key], value, lst + [key]) == None:\n continue\n else:\n return recurHelper(d[key], value, lst + [key])\n\n\ndef findCategoryPath(d, value):\n lst = []\n return recurHelper(d, value, lst)\n\n\n\n\n\ndef testFindCategoryPath():\n print(\"Testing findCategoryPath...\", end=\"\")\n d = {\"Sporting\":\n {\"Spaniel\":\n {\"English Springer\": \"Betsy\"},\n \"Weimaraner\": \"Xeva\",\n \"Retriever\":\n {\"Golden\": \"Sammo\",\n \"Labrador\": \"Nya\"}\n },\n \"Working\":\n {\"Husky\": \"Stella\",\n \"Saint Bernard\": \"Rutherfurd\",\n \"Boxer\": \"Paximus\"},\n \"Herding\":\n {\"Corgi\":\n {\"Welsh\":\n {\"Cardigan\": \"Geb\",\n \"Pembroke\": \"Niinja\"}\n },\n \"Sheepdog\":\n {\"Bergamasco\": \"Samur\",\n \"Old English\": \"Duggy\",\n \"Shetland\": \"Walker\"}\n },\n \"Other\": \"Kimchee\"\n }\n value1 = \"Samur\"\n value2 = \"Weimaraner\"\n assert(findCategoryPath(d, value1) == [\"Herding\", \"Sheepdog\", \"Bergamasco\"])\n assert(findCategoryPath(d, value2) == None)\n print(\"Passed!\")\n\n# testFindCategoryPath()\n\nd = {\"Sporting\":\n {\"Spaniel\":\n {\"English Springer\": \"Betsy\"},\n \"Weimaraner\": \"Xeva\",\n \"Retriever\":\n {\"Golden\": \"Sammo\",\n \"Labrador\": \"Nya\"}\n },\n \"Working\":\n {\"Husky\": \"Stella\",\n \"Saint Bernard\": \"Rutherfurd\",\n \"Boxer\": \"Paximus\"},\n \"Herding\":\n {\"Corgi\":\n {\"Welsh\":\n {\"Cardigan\": \"Geb\",\n \"Pembroke\": \"Niinja\"}\n },\n \"Sheepdog\":\n {\"Bergamasco\": \"Samur\",\n \"Old English\": \"Duggy\",\n \"Shetland\": \"Walker\"}\n },\n \"Other\": \"Kimchee\"\n }\nprint(findCategoryPath(d, \"Samur\"))\nprint(findCategoryPath(d, \"Weimaraner\"))\nprint(findCategoryPath(d, \"Betsy\"))\nprint(findCategoryPath(d, \"Other\"))\n" }, { "alpha_fraction": 0.4432900547981262, "alphanum_fraction": 0.4753246605396271, "avg_line_length": 32.014286041259766, "blob_id": "aa4c9eaaf4070232ee40b6a4038324010e0e85e0", "content_id": "112ee1ffeb968c96cd2a7d4ee27095ec44ce13da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2310, "license_type": "no_license", "max_line_length": 80, "num_lines": 70, "path": "/15112-CMU/week2/rec2/rec2.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "###############################################################################\n# --------------- 15-112 Recitation Week 2: Loops & Graphics ---------------- #\n\n# This is a starter file of the problems we did in recitation. A good way to\n# use this file is to try to re-write problems you saw in recitation from\n# scratch. This way, you can test your understanding and ask on Piazza or\n# office hours if you have questions :)\n\n# --------------------------------------------------------------------------- #\n###############################################################################\n# Code Tracing\n###############################################################################\n\ndef ct2(n):\n k = 0\n total = 0\n while (n >= k):\n print('k =', k)\n for i in range(k):\n total += n%10\n n //= 10\n print(i, n%10, total)\n k += 1 \n print('total =', total)\n return k\n\n# print(ct2(123))\n\n###############################################################################\n# Code Tracing\n###############################################################################\n\n'''\nWrite the function longestDigitRun(n) that takes a possibly-negative int value n\nand returns the digit that has the longest consecutive run (ignoring ties).\nSo,longestDigitRun(117773732) returns 7 (because there is a run of 3 consecutive\n7's), and longestDigitRun(-677886) can return either 7 or 8 because both have\nruns of length two.\n'''\n\ndef longestDigitRun(n):\n return 42\n\n\ndef testLongestDigitRun():\n assert(longestDigitRun(117773732) == 7)\n result = longestDigitRun(-677886)\n assert(result == 7 or result == 8)\n # TODO: add more test cases here!\n\n###############################################################################\n# Draw American Flag\n###############################################################################\n\nfrom tkinter import *\n\ndef drawAmericanFlag(canvas, width, height):\n pass\n\ndef runDrawing(width=300, height=300):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n drawAmericanFlag(canvas, width, height)\n root.mainloop()\n print(\"bye!\")\n\n# runDrawing(350, 200)" }, { "alpha_fraction": 0.46074575185775757, "alphanum_fraction": 0.5763927102088928, "avg_line_length": 34.72577667236328, "blob_id": "f6c59a59936cbac55b5cf0d7d416c4a2fd11d8a1", "content_id": "642a099e104522e7dbe9f6263bc62ad16aead81f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20087, "license_type": "no_license", "max_line_length": 82, "num_lines": 547, "path": "/15112-CMU/week3/hw3.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\r\n# Hw3\r\n# Your andrewID:mxu2\r\n# Your section: 2N\r\n#################################################\r\n\r\nimport cs112_s19_week3_linter\r\n\r\n\r\n#################################################\r\n# Lab3 COLLABORATIVE LAB problem\r\n# (The problem description will be released Friday, Feb 1)\r\n#################################################\r\n# The problems in this section are LAB PROBLEMS, which means you MUST\r\n# work on them with at least one collaborator. See the collaboration\r\n# policy in the syllabus for more details. Always list your collaborators!\r\n# For lab problems, YOU MUST LIST AT LEAST ONE COLLABORATOR\r\n\r\n# Note: You will need extra files for gradebookSummary. These will be\r\n# released FRIDAY during lab! Comment out the tests for now!\r\ndef gradebookSummaryCollaborators():\r\n return \"nobody\"\r\n\r\n# Return content in files\r\ndef readFile(path):\r\n with open(path, \"rt\") as f:\r\n return f.read()\r\n\r\n# Return the summary of students grades in files\r\ndef gradebookSummary(gradebookFilename):\r\n script = readFile(gradebookFilename)\r\n script = script.split(\"\\n\")\r\n result = \"\"\r\n for line in script:\r\n if line != '':\r\n if not line.startswith(\"#\"):\r\n name = \"\"\r\n score = 0\r\n characters = line.split(\",\")\r\n for i in range(len(characters)):\r\n if i == 0:\r\n name += characters[i]\r\n else:\r\n score += int(characters[i])\r\n score = score/(len(characters) - 1)\r\n score = \"%0.2f\" % score\r\n result += name + \"\\t\" + score + \"\\n\"\r\n result = result[:-1]\r\n return result\r\n\r\ndef applyCaesarCipherCollaborators():\r\n return \"nobody\"\r\n\r\ndef getFinalLetter(ch, shiftNum):\r\n stayInAlphabet = ord(ch) + shiftNum\r\n if ch.isalpha() and ch.islower() and shiftNum > 0:\r\n if stayInAlphabet > ord('z'):\r\n stayInAlphabet -= 26\r\n elif ch.isalpha() and ch.islower() and shiftNum < 0:\r\n if stayInAlphabet < ord('a'):\r\n stayInAlphabet += 26\r\n elif ch.isalpha() and ch.isupper() and shiftNum > 0:\r\n if stayInAlphabet > ord('Z'):\r\n stayInAlphabet -= 26\r\n elif ch.isalpha() and ch.isupper() and shiftNum < 0:\r\n if stayInAlphabet < ord('A'):\r\n stayInAlphabet += 26\r\n finalLetter = chr(stayInAlphabet)\r\n return finalLetter\r\n\r\n# Return a cipher text follow by Caesar's law\r\ndef applyCaesarCipher(message, shiftNum):\r\n cipherText = \"\"\r\n for ch in message:\r\n if ch.isspace():\r\n finalLetter = ch\r\n cipherText += finalLetter\r\n elif ch.isalpha():\r\n finalLetter = getFinalLetter(ch, shiftNum)\r\n cipherText += finalLetter\r\n else:\r\n cipherText += ch\r\n return cipherText\r\n\r\n#################################################\r\n# Hw2 COLLABORATIVE problem\r\n#################################################\r\n# The problem in this section is COLLABORATIVE, which means you may\r\n# work on it with your classmates if you wish. See the collaboration\r\n# policy in the syllabus for more details. Always list your collaborators!\r\n\r\ndef rightJustifyTextCollaborators():\r\n return \"xiaoqint\"\r\n\r\n# Return a newText by replacing various kinds of spaces\r\n# in text with a single space\r\ndef replaceWhiteSpace(text):\r\n element = text.split()\r\n newText = \" \".join(element)\r\n return newText\r\n\r\n# Return a right justified text with a fixed width\r\ndef rightJustifyText(text, width):\r\n text = replaceWhiteSpace(text)\r\n lenOfStr = len(text)\r\n i = width\r\n judgeIndex = i\r\n while judgeIndex < lenOfStr:\r\n if text[i] == \" \":\r\n text = text[:i] + \"\\n\" + text[i + 1:]\r\n judgeIndex = i + width + 1\r\n i = judgeIndex\r\n else:\r\n while text[i] != \" \":\r\n i -= 1\r\n if text[i] == \" \":\r\n text = text[:i] + \"\\n\" + text[i + 1:]\r\n judgeIndex = i + width + 1\r\n i = judgeIndex\r\n if judgeIndex > lenOfStr - 1:\r\n break\r\n lines = text.split('\\n')\r\n result = \"\"\r\n for line in lines:\r\n spaces = width - len(line)\r\n if lines.index(line) != len(lines) - 1:\r\n result += (\" \" * spaces + line + \"\\n\")\r\n else:\r\n result += (\" \" * spaces + line)\r\n return result\r\n\r\n\r\n#################################################\r\n# Hw2 SOLO problems\r\n#################################################\r\n\r\n\r\n\"\"\"\r\nList your style fixes here:\r\n1: change str to char because str is a function name means turn\r\n something to a string\r\n2: count_matches_1= 0, for this variable missing a whitespace before the = sign.\r\nThe same error with if count_matches_1 !=count_matches_2:\r\n In order to Fix the style, we\r\nneed to write like this if count_matches_1 != count_matches_2:\r\n3: I put the statement print(\"bad case\") before return False.\r\n If it is after return False,\r\nthe statement print(\"bad case\") is unreachable.\r\n4: multiple if statements should be joined into an if-elif-else chain.\r\n5: Comments should be included at the start of\r\n every function (including helper functions).\r\n\"\"\"\r\n\r\n# Return True if if s1 and s2 are Anagram (that is, if they contain the same\r\n# letters in possibly-different orders)\r\ndef areAnagrams(s1, s2):\r\n if len(s1) != len(s2):\r\n print(\"bad case\")\r\n return False\r\n else:\r\n for char in s1:\r\n one = 1\r\n count_matches_1 = 0\r\n count_matches_2 = 0\r\n for i in range(len(s1)):\r\n if s1[i] == char:\r\n count_matches_1 += one\r\n for i in range(len(s2)):\r\n if s2[i] == char:\r\n count_matches_2 += one\r\n if count_matches_1 != count_matches_2:\r\n return False\r\n return True\r\n\r\n\r\n# Return all substrings of a string\r\ndef getAllSubstrings(s):\r\n substrings = \"\"\r\n lenOfS = len(s)\r\n for i in range(0, lenOfS):\r\n for j in range(i, lenOfS):\r\n string = s[i: j+1]\r\n substrings += string + \",\"\r\n substrings = substrings[:-1]\r\n return substrings\r\n\r\n\r\n# Return common substrings of two strings\r\ndef findCommonSubstrings(s1, s2):\r\n commonStrings = \"\"\r\n substringOfS1 = getAllSubstrings(s1)\r\n substringOfS2 = getAllSubstrings(s2)\r\n strings1 = substringOfS1.split(\",\")\r\n strings2 = substringOfS2.split(\",\")\r\n for string_1 in strings1:\r\n for string_2 in strings2:\r\n if string_1 == string_2:\r\n commonStrings += string_1 + \",\"\r\n commonStrings = commonStrings[:-1]\r\n return commonStrings\r\n\r\n\r\n# Return the longest common substring in common substrings\r\ndef longestCommonSubstring(s1, s2):\r\n if s1 == \"\" or s2 == \"\":\r\n return \"\"\r\n elif s1 == s2:\r\n return s1\r\n else:\r\n commonStrings = findCommonSubstrings(s1, s2)\r\n lenOfMax = 0\r\n for commonString in commonStrings.split(\",\"):\r\n if len(commonString) > lenOfMax:\r\n lenOfMax = len(commonString)\r\n result = \"\"\r\n for commonString in commonStrings.split(\",\"):\r\n if len(commonString) == lenOfMax:\r\n result += commonString + \",\"\r\n result = result[:-1]\r\n resultList = result.split(\",\")\r\n return min(resultList)\r\n\r\n\r\n### getEvalSteps is a bonus problem, and therefore optional ###\r\n# Note: Bonus problems are solo. Do not collaborate on bonus problems.\r\ndef getEvalSteps(expr):\r\n return\r\n\r\n#################################################\r\n# Hw3 Graphics Functions\r\n# All graphics must go under here to avoid angering the autograder!\r\n# ignore_rest\r\n#################################################\r\nfrom tkinter import *\r\n\r\n# Transfer original string to a new string with equal length of each line\r\ndef makeUpString(text):\r\n lines = text.split(\"\\n\")\r\n maxLenOfLine = len(max(lines, key=len))\r\n result = \"\"\r\n for line in lines:\r\n blanks = maxLenOfLine - len(line)\r\n result += (line + blanks * \" \" + \"\\n\")\r\n result = result[:-1]\r\n result = \"\".join(result)\r\n return result\r\n\r\n# Return the number of the row of Canvas need to draw\r\ndef findRowOfCanvas(text):\r\n text = makeUpString(text)\r\n height = 0\r\n for c in text:\r\n if c == \"\\n\":\r\n height += 1\r\n height += 1\r\n return height\r\n\r\n# Return the number of the column of Canvas need to draw\r\ndef findColOfcanvas(text):\r\n text = makeUpString(text)\r\n lines = text.split(\"\\n\")\r\n column = len(lines[0])\r\n return column\r\n\r\ndef getColor(line, i):\r\n color = \"\"\r\n if line[i] == \"0\":\r\n color = \"#000\"\r\n elif line[i] == \"1\":\r\n color = \"#00F\"\r\n elif line[i] == \"2\":\r\n color = \"#0F0\"\r\n elif line[i] == \"3\":\r\n color = \"#0FF\"\r\n elif line[i] == \"4\":\r\n color = \"#F00\"\r\n elif line[i] == \"5\":\r\n color = \"#F0F\"\r\n elif line[i] == \"6\":\r\n color = \"#FF0\"\r\n elif line[i] == \"7\":\r\n color = \"#FFF\"\r\n return color\r\n\r\n# This is the main function of asciiDraw\r\ndef asciiDraw(canvas, artStr, width, height):\r\n newText = makeUpString(artStr)\r\n rowOfCanvas = findRowOfCanvas(newText)\r\n colOfCanvas = findColOfcanvas(newText)\r\n heightOfRectangle = height / rowOfCanvas\r\n widthOfRectangle = width / colOfCanvas\r\n j = 0\r\n for line in newText.split(\"\\n\"):\r\n for i in range(len(line)):\r\n left = 0 + i * widthOfRectangle\r\n top = 0 + j * heightOfRectangle\r\n right = left + widthOfRectangle\r\n bottom = top + heightOfRectangle\r\n color = getColor(line, i)\r\n canvas.create_rectangle(left, top, right, bottom, fill=color, width=0)\r\n j += 1\r\n\r\n\r\n#################################################\r\n# Hw3 Test Functions\r\n#################################################\r\n\r\nimport string\r\n\r\n\r\ndef testGradebookSummary():\r\n print(\"Testing gradebookSummary()...\", end=\"\")\r\n import os\r\n if not os.path.exists(\"hw3_files\"):\r\n assert False,\"You need to unzip hw3_files.zip to test gradebookSummary\"\r\n\r\n assert(gradebookSummary(\"hw3_files/gradebook1.txt\") ==\r\n \"wilma\\t92.67\\nfred\\t90.40\\nbetty\\t88.00\")\r\n assert(gradebookSummary(\"hw3_files/gradebook2.txt\") ==\r\n \"wilma\\t92.67\\nfred\\t90.40\\nbetty\\t88.00\")\r\n assert(gradebookSummary(\"hw3_files/small1.txt\") ==\r\n \"fred\\t0.00\")\r\n assert(gradebookSummary(\"hw3_files/small2.txt\") ==\r\n \"fred\\t-1.00\\nwilma\\t-2.00\")\r\n assert(gradebookSummary(\"hw3_files/small3.txt\") ==\r\n \"fred\\t100.50\")\r\n assert(gradebookSummary(\"hw3_files/small4.txt\") ==\r\n \"fred\\t49.00\\nwilma\\t50.00\")\r\n print(\"Passed.\")\r\n\r\ndef testApplyCaesarCipher():\r\n print(\"Testing applyCaesarCipher()...\", end=\"\")\r\n assert(applyCaesarCipher(\"abcdefghijklmnopqrstuvwxyz\", 3) == \\\r\n \"defghijklmnopqrstuvwxyzabc\")\r\n assert(applyCaesarCipher(\"We Attack At Dawn\", 1) == \"Xf Buubdl Bu Ebxo\")\r\n assert(applyCaesarCipher(\"1234\", 6) == \"1234\")\r\n print(\"Passed.\")\r\n\r\ndef testRightJustifyText():\r\n print(\"Testing rightJustifyText()...\", end=\"\")\r\n text1 = \"\"\"\\\r\nWe hold these truths to be self-evident: that all men are created equal;\r\nthat they are endowed by their Creator with certain unalienable rights;\r\nthat among these are life, liberty, and the pursuit of happiness.\"\"\"\r\n text1Result = \"\"\"\\\r\n We hold these truths to be\r\nself-evident: that all men are\r\n created equal; that they are\r\n endowed by their Creator with\r\n certain unalienable rights;\r\n that among these are life,\r\n liberty, and the pursuit of\r\n happiness.\"\"\"\r\n assert(rightJustifyText(text1, 30) == text1Result)\r\n text2 = \"\"\"\\\r\nThough, in reviewing the incidents of my administration,\r\nI am unconscious of intentional error, I am nevertheless too sensible of my\r\ndefects not to think it probable that I may have committed many errors.\r\nI shall also carry with me the hope that my country will view them with\r\nindulgence; and that after forty-five years of my life dedicated to its service\r\nwith an upright zeal, the faults of incompetent abilities will be consigned to\r\noblivion, as I myself must soon be to the mansions of rest.\r\n\r\nI anticipate with pleasing expectation that retreat in which I promise myself\r\nto realize the sweet enjoyment of partaking, in the midst of my fellow-citizens,\r\nthe benign influence of good laws under a free government,\r\nthe ever-favorite object of my heart, and the happy reward,\r\nas I trust, of our mutual cares, labors, and dangers.\"\"\"\r\n text2Result = \"\"\"\\\r\n Though, in reviewing the incidents of my administration, I am\r\nunconscious of intentional error, I am nevertheless too sensible of my\r\n defects not to think it probable that I may have committed many\r\n errors. I shall also carry with me the hope that my country will view\r\n them with indulgence; and that after forty-five years of my life\r\n dedicated to its service with an upright zeal, the faults of\r\n incompetent abilities will be consigned to oblivion, as I myself must\r\n soon be to the mansions of rest. I anticipate with pleasing\r\n expectation that retreat in which I promise myself to realize the\r\n sweet enjoyment of partaking, in the midst of my fellow-citizens, the\r\n benign influence of good laws under a free government, the\r\never-favorite object of my heart, and the happy reward, as I trust, of\r\n our mutual cares, labors, and dangers.\"\"\"\r\n assert(rightJustifyText(text2, 70) == text2Result)\r\n print(\"Passed.\")\r\n\r\ndef testLongestCommonSubstring():\r\n print(\"Testing longestCommonSubstring()...\", end=\"\")\r\n assert(longestCommonSubstring(\"abcdef\", \"abqrcdest\") == \"cde\")\r\n assert(longestCommonSubstring(\"abcdef\", \"ghi\") == \"\")\r\n assert(longestCommonSubstring(\"\", \"abqrcdest\") == \"\")\r\n assert(longestCommonSubstring(\"abcdef\", \"\") == \"\")\r\n assert(longestCommonSubstring(\"abcABC\", \"zzabZZAB\") == \"AB\")\r\n print(\"Passed.\")\r\n\r\ndef runAsciiDraw(artStr, width, height):\r\n root = Tk()\r\n root.resizable(width=False, height=False) # prevents resizing window\r\n canvas = Canvas(root, width=width, height=height)\r\n canvas.configure(bd=0, highlightthickness=0)\r\n canvas.pack()\r\n asciiDraw(canvas, artStr, width, height)\r\n root.mainloop()\r\n\r\ndef testAsciiDraw():\r\n testPattern=\"0123\\n4567\"\r\n print(\"Testing asciiDraw with color pattern:\\n\",testPattern,end=\"\")\r\n runAsciiDraw(testPattern, 600, 300)\r\n\r\n diamondPattern=''' \r\n 1 2 4\r\n 111 222 444\r\n11111 22222 44444\r\n 111 222 444\r\n 1 2 4\r\n '''\r\n\r\n print(\"Testing asciiDraw with diamond pattern:\\n\",diamondPattern,end=\"\")\r\n runAsciiDraw(diamondPattern, 600, 300)\r\n\r\n facePattern = ''' \r\n 0022222222222222222\r\n 02222222222222222222222220\r\n 02222222222222222222222222222220 02 02 02\r\n 0 0 0 02222222222222222222222222222222222220 02 22 2202\r\n0 2 2 02 0222222222 2222222222222 2222222220 02202202\r\n022222202 0222222222 22222222222 22222222220 02222222\r\n 0222222 02222222222 22222222222 22222222222222222222222\r\n 02222222222222222222222 2222222222222 22222222222222 0222\r\n 022202222222222222222222222222222222222222222222222222222 0222\r\n 022 022222222222222222222222222222222222222222222222222 02220\r\n 0220 222222222222222222222222222222222222222222222222222 2220\r\n 022 222222222222222222222222222222222222222222222000222222022220\r\n 0222022222 2222222222222222222222222222222222222 022222222222222222\r\n 0222 202222 2222222222222222222222222222222222 02220\r\n 0222 0222 022222222222222222222222222220 0222\r\n 02220 02222222222222222220220 022\r\n 0220 02202222220 0222\r\n 02220 02220\r\n 022220 02222220022220 02222\r\n 0222220 022222222220 022220\r\n 0222220 022222222222220\r\n 02222222022222222222\r\n 022222222222\r\n 022222222222\r\n 02222222220\r\n 02220\r\n \r\n '''\r\n print(\"Testing asciiDraw with face pattern:\\n\",facePattern,end=\"\")\r\n runAsciiDraw(facePattern, 800, 600)\r\n\r\n hourglassPattern = ''' \r\n 0000\r\n 00000000000000000000 00\r\n00000000 0000000000000 00\r\n0 000000000000000000 000000 00\r\n0000 11111 0 0 00 00\r\n00 0 111111111111111110 0 0000 0\r\n 0 000000111111111111111111110 00000 0000\r\n 0 0 0 111111111111111111110 0 0 0\r\n 0 0 0 111111111111111555550 0 0 0\r\n 0 0 0 111111111555644644640 0 0 0\r\n 0 0 0 111115546446446446440 0 0 0\r\n 0 0 0 111546464464464464460 0 0 0\r\n 0 0 0 154644644644644646660 0 0 0\r\n 0 0 0 056446446446666666660 0 0 0\r\n 0 0 0 0 5666666666666666650 0 0 0\r\n 0 0 0 0 566666666666666510 0 0 0\r\n 0 0 0 0 16666666666661 0 0 0 0\r\n 0 0 0 0 116666666651 0 0 0 0\r\n 0 0 0 0 1156111 0 0 0 0\r\n 0 0 0 0 55 0 0 0 0\r\n 0 0 0 0 11165111 0 0 0 0\r\n 0 0 0 0 1111156111111 0 0 0 0\r\n 0 0 0 0 111111551111111 0 0 0 0\r\n 0 0 0 0 111111165111111110 0 0 0\r\n 0 0 0 0 1111111155111111110 0 0 0\r\n 0 0 0 011111111161111111110 0 0 0\r\n 0 0 0 011111111161111111110 0 0 0\r\n 0 0 0 111111115666511111110 0 0 0\r\n 0 0 0 111111156666651111110 0 0 0\r\n 0 0 0 111111566666665111110 0 0 0\r\n 0 0 01111115666666666511110 0 0 0\r\n 0 0 0001111156666666666651110 00000 0000\r\n 0 00 1111566666666666665110 0 000 00\r\n 0 0 1111566666666666666510 0 00 0\r\n00000 0115666666666666666510 0 00 00\r\n0 00000000006666660000 000 00 00\r\n0000000000000 111111 0000000000 00\r\n 00000000000000000000000\r\n '''\r\n print(\"Testing asciiDraw with hourglass pattern:\\n\",hourglassPattern,end=\"\")\r\n runAsciiDraw(hourglassPattern, 400, 600)\r\n print(\"Done testing asciiDraw!\")\r\n\r\ndef testBonusGetEvalSteps():\r\n print(\"Testing getEvalSteps()...\", end=\"\")\r\n assert(getEvalSteps(\"0\") == \"0 = 0\")\r\n assert(getEvalSteps(\"2\") == \"2 = 2\")\r\n assert(getEvalSteps(\"3+2\") == \"3+2 = 5\")\r\n assert(getEvalSteps(\"3-2\") == \"3-2 = 1\")\r\n assert(getEvalSteps(\"3**2\") == \"3**2 = 9\")\r\n assert(getEvalSteps(\"31%16\") == \"31%16 = 15\")\r\n assert(getEvalSteps(\"31*16\") == \"31*16 = 496\")\r\n assert(getEvalSteps(\"32//16\") == \"32//16 = 2\")\r\n assert(getEvalSteps(\"2+3*4\") == \"2+3*4 = 2+12\\n = 14\")\r\n assert(getEvalSteps(\"2*3+4\") == \"2*3+4 = 6+4\\n = 10\")\r\n assert(getEvalSteps(\"2+3*4-8**3%3\") == \"\"\"\\\r\n2+3*4-8**3%3 = 2+3*4-512%3\r\n = 2+12-512%3\r\n = 2+12-2\r\n = 14-2\r\n = 12\"\"\")\r\n assert(getEvalSteps(\"2+3**4%2**4+15//3-8\") == \"\"\"\\\r\n2+3**4%2**4+15//3-8 = 2+81%2**4+15//3-8\r\n = 2+81%16+15//3-8\r\n = 2+1+15//3-8\r\n = 2+1+5-8\r\n = 3+5-8\r\n = 8-8\r\n = 0\"\"\")\r\n print(\"Passed.\")\r\n\r\n#################################################\r\n# Hw3 Main\r\n#################################################\r\n\r\ndef testAll():\r\n testGradebookSummary()\r\n testApplyCaesarCipher()\r\n testRightJustifyText()\r\n testLongestCommonSubstring()\r\n testAsciiDraw()\r\n\r\n #Uncomment the next line if you want to try the bonus!\r\n #testBonusGetEvalSteps()\r\n\r\n\r\ndef main():\r\n cs112_s19_week3_linter.lint() # check for banned tokens\r\n # testAll()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\nprint(longestCommonSubstring(\"abcdef\", \"abqrcdest\"))" }, { "alpha_fraction": 0.5795094966888428, "alphanum_fraction": 0.6159018874168396, "avg_line_length": 38.453125, "blob_id": "34277462227d2d9e0a4fdd520125196ef5873dc0", "content_id": "d9c0fb09b2ec225a5368fb5caeb026d34b7bcf99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2528, "license_type": "no_license", "max_line_length": 89, "num_lines": 64, "path": "/15112-CMU/TP/ControlBoard.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this file is mainly for class GameControl\nimport pygame\nfrom Colors import *\nfrom Ball import boundaryElem\nfrom Ball import ball\n\n\nclass GameControl(object):\n def __init__(self):\n self.isBlueTeamGoal = True\n self.isRedTeamGoal = True\n self.blueTeamScore = 0\n self.redTeamScore = 0\n self.goalZoneRectForBlue = pygame.Rect(boundaryElem()[4],\n (boundaryElem()[5] + boundaryElem()[6] +\n 1 / 3 * (boundaryElem()[3])), 10, 200)\n self.goalZoneRectForRed = pygame.Rect(boundaryElem()[4] + boundaryElem()[2] - 10,\n (boundaryElem()[5] + boundaryElem()[6] +\n 1 / 3 * (boundaryElem()[3])), 150, 200)\n self.font1 = pygame.font.Font('assets/fonts/Muli-Regular.ttf', 30)\n self.font2 = pygame.font.Font('assets/fonts/Muli-Bold.ttf', 30)\n\n\n # this method is to check whether team play has goal\n def teamISGoal(self):\n if ball.rect.colliderect(self.goalZoneRectForBlue):\n self.redTeamScore += 1\n return self.isBlueTeamGoal\n elif ball.rect.colliderect(self.goalZoneRectForRed):\n self.blueTeamScore += 1\n return self.isRedTeamGoal\n else:\n return False\n\n\n # cite I write generate Text according to this link's code\n # https://github.com/ilaishai/dotchaser/blob/b1e26fe62e54429\n # afd1689c1051a201fe79f5a79/venv/Lib/site-packages/pygame/tests/font_test.py\n\n\n # this is the generate score on gameDisPlay\n def generateScore(self, text, color, gameDisplay, coordinateX):\n import random\n fontLst = [self.font1, self.font2]\n font = random.choice(fontLst)\n surface = font.render(text, True, color)\n surfaceRect = surface.get_rect()\n halfHeightOfScoreTable = 30\n surfaceRect.center = (coordinateX, halfHeightOfScoreTable)\n gameDisplay.blit(surface, surfaceRect)\n\n\n # this is the draw score main function\n def drawScore(self, gameDisplay):\n text1 = 'Blue Team: '\n color1 = Colors.Blue\n text2 = str(self.blueTeamScore)\n text3 = 'Red Team: '\n color2 = Colors.Red\n text4 = str(self.redTeamScore)\n self.generateScore(text1, color1, gameDisplay, 450)\n self.generateScore(text2, color1, gameDisplay, 550)\n self.generateScore(text3, color2, gameDisplay, 660)\n self.generateScore(text4, color2, gameDisplay, 750)\n\n\n\n" }, { "alpha_fraction": 0.511965811252594, "alphanum_fraction": 0.526068389415741, "avg_line_length": 22.887754440307617, "blob_id": "6a176dc1b5094b005334ef830955fedca0d9b915", "content_id": "c1c0594abcf035aafb6b7ecddb6cb36eb7bd6a83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2340, "license_type": "no_license", "max_line_length": 65, "num_lines": 98, "path": "/15112-CMU/untitled folder 2/hehe.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def numberWooTriple(L):\n s = set(L)\n res = set()\n for i in range(len(L)):\n for j in range(len(L)):\n c = L[i]**2 % L[j]\n t = (L[i], L[j], c)\n t = tuple(sorted(t))\n if c in s:\n res.add(t)\n return res\n\nprint(numberWooTriple([1,3,2]))\n\n\ndef check(L):\n if len(L) == 1:\n if int(L[0]**0.5) == L[0]**0.5:\n return True\n else:\n return False\n for i in range(len(L) - 1):\n if int((L[i] + L[i + 1])**0.5) != (L[i] + L[i + 1])**0.5:\n return False\n return True\n\n\ndef getSquarefulArrangeHelper(res, L):\n if len(L) == 0 and check(res):\n return res\n if len(L) == 0:\n return\n for elem in L:\n if check(res + [elem]):\n res.append(elem)\n L.remove(elem)\n tmp = getSquarefulArrangeHelper(res, L)\n if tmp is not None:\n return tmp\n return None\n\n\ndef getSquarefulArrange(L):\n return getSquarefulArrangeHelper([], L)\n\n\nprint(getSquarefulArrange([1,17,8]))\nprint(getSquarefulArrange([1,2,3]))\n\n\ndef readFile(path):\n with open(path, \"rt\") as f:\n return f.read()\n\ndef writeFile(path, contents):\n with open(path, \"wt\") as f:\n f.write(contents)\n\nimport os\n\n\n\ndef bestNameFile(path, name):\n if os.path.isfile(path):\n return path\n else:\n numOfOcurrance = 0\n res = \"\"\n for filename in os.listdir(path):\n if filename == '.DS_Store':\n continue\n tmpPath = bestNameFile(path + \"/\" + filename, name)\n contents = readFile(tmpPath)\n if contents.count(name) >= numOfOcurrance:\n numOfOcurrance = contents.count(name)\n res = tmpPath\n return res\n\n\n\ndef bestNameFileHelper(path, name):\n if os.path.isfile(path):\n contents = readFile(path).lower()\n occurance = contents.count(name)\n return path, occurance\n else:\n current = (path, 0)\n for filename in os.listdir(path):\n newPath = path + os.sep + filename\n temp = bestNameFileHelper(newPath, name)\n if current[1] <= temp[1]:\n current = temp\n return current\n\n\ndef bestNameFile2(path, name):\n name = name.lower()\n return bestNameFileHelper(path, name)[0]" }, { "alpha_fraction": 0.5271936058998108, "alphanum_fraction": 0.5395213961601257, "avg_line_length": 23.64285659790039, "blob_id": "1e90ab4f3de48ac28896fd708e6e83e576cf99cd", "content_id": "4d70b9caa217e0eaad9a8aa66126f490bfaab0ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1379, "license_type": "no_license", "max_line_length": 57, "num_lines": 56, "path": "/15112-CMU/week4 cold cold/nnnn.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def perm(lst):\n if len(lst) == 0:\n return []\n elif len(lst) == 1:\n return [lst]\n else:\n result = []\n for i in range(len(lst)):\n x = lst[i]\n xs = lst[:i] + lst[i + 1:]\n for p in perm(xs):\n result.append([x] + p)\n return result\n\n\ndef getSubLists(lst):\n outPut = [[]]\n for i in range(len(lst)):\n for j in range(len(outPut)):\n outPut.append(outPut[j] + [lst[i]])\n outPut.remove([])\n return outPut\n\n\ndef findAllPossibleWordsOfHand(hand):\n if len(hand) == 1:\n return hand\n result = []\n subLists = getSubLists(hand)\n for i in range(len(subLists)):\n result += perm(subLists[i])\n answer = []\n for j in range(len(result)):\n char = \"\".join(result[j])\n answer += [char]\n return answer\n\n\ndef bestScrabbleScore(dictionary, hand):\n allPossibleWords = findAllPossibleWordsOfHand(hand)\n # valueOfWords = []\n # valueOfWord = 0\n wordLst = []\n for i in range(len(allPossibleWords)):\n if allPossibleWords[i] in dictionary:\n wordLst.append(allPossibleWords[i])\n print(wordLst)\n\n\n\n\ndef d1(): return [\"a\", \"b\", \"c\"]\ndef ls1(): return [1] * 26\ndef d2(): return [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\ndef ls2(): return [1 + (i % 5) for i in range(26)]\nprint(bestScrabbleScore(d2(),[\"x\",\"y\",\"z\"]))" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.626893937587738, "avg_line_length": 27.303571701049805, "blob_id": "18e1ae48f03a791338f14d0457c8176a9ef033c7", "content_id": "c6376ed17a4fcb00dfb64eebc7f7959cefa76956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1584, "license_type": "no_license", "max_line_length": 77, "num_lines": 56, "path": "/15112-CMU/112-opencv-tutorial-master/manualThreshold.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\n# This is going to be EXTREMELY slow, since we're using python for loops\ndef manual_threshold(image):\n # Define some constants\n WHITE = 255\n BLACK = 0\n THRESH = 127\n\n # Convert our input image to grayscale so that it's easy to threshold\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Create a new array of all zeros to store our thresholded image in\n # It will be the same size as our grey image\n thresholded = np.zeros(grey.shape, np.uint8)\n \n # Iterate over the grey image, and store results in thresholded\n for i in xrange(grey.shape[0]):\n for j in xrange(grey.shape[1]):\n # If we're over a certain target value, then saturate to white\n # otherwise, we're under the bar, dilute to black\n thresholded[i][j] = WHITE if grey[i][j] > THRESH else BLACK\n\n # Return our handiwork\n return thresholded\n\n# We've finally put our code in a function instead!\ndef main():\n\n window_name = \"Webcam!\"\n\n cam_index = 0\n cv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\n\n cap = cv2.VideoCapture(cam_index)\n cap.open(cam_index)\n\n while True:\n\n ret, frame = cap.read()\n\n if frame is not None:\n # Instead of showing the original image, show the thresholded one\n cv2.imshow(window_name, manual_threshold(frame))\n \n k = cv2.waitKey(1) & 0xFF\n if k == 27: # Escape key\n cv2.destroyAllWindows()\n cap.release()\n break\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.579727828502655, "alphanum_fraction": 0.5804256796836853, "avg_line_length": 39.09090805053711, "blob_id": "616d5a0aa202c04b09249713e21aa3deda93c5ac", "content_id": "1e5a68031f16fc0c22cf3b490480ad83dbe3e7f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5732, "license_type": "no_license", "max_line_length": 83, "num_lines": 143, "path": "/15112-CMU/Game_AI/GameAI.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# written by Eric Clinch\n\n#####################################################\n# psuedocode for minimax with no heuristic #\n#####################################################\n\n# takes a board and returns a tuple (move, score) where move is the\n# best move for Maxie and score is the board score that results\n# from making that move. The best move is the one that maximizes\n# Maxie's score by maximizing the board score\ndef MaxieMove(board):\n if board.gameOver():\n return (None, float('inf')) if board.won(Maxie) else (None, float('-inf'))\n else:\n bestMove = None\n bestScore = float('-inf')\n for move in board.legalMoves(Maxie):\n board.makeMove(move)\n _, moveScore = MinnieMove(board)\n board.undoMove(move)\n if moveScore > bestScore:\n bestScore = moveScore\n bestMove = move\n return (bestMove, bestScore)\n\n# same as Maxie, but maximizes Minnie's score by minimizing\n# the board score\ndef MinnieMove(board):\n if board.gameOver():\n return (None, float('-inf')) if board.won(Minnie) else (None, float('inf'))\n else:\n bestMove = None\n bestScore = float('inf')\n for move in board.legalMoves(Minnie):\n board.makeMove(move)\n _, moveScore = MaxieMove(board)\n board.undoMove(move)\n if moveScore < bestScore:\n bestScore = moveScore\n bestMove = move\n return (bestMove, bestScore)\n\n#####################################################\n# psuedocode for minimax with heuristics #\n#####################################################\n\n# takes a board and depth and returns a tuple (move, score) where move is the\n# best move for Maxie and score is the board score that results\n# from making that move. The best move is the one that maximizes\n# Maxie's score by maximizing the board score.\n# If depth is the max depth, returns the score given by a heuristic function\ndef MaxieMoveWithHeuristics(board, depth):\n if board.gameOver():\n return (None, float('inf')) if board.won(Maxie) else (None, float('-inf'))\n else if depth == maxDepth:\n return (None, heuristic(board))\n else:\n bestMove = None\n bestScore = float('-inf')\n for move in board.legalMoves(Maxie):\n board.makeMove(move)\n _, moveScore = MinnieMoveWithHeuristics(board, depth + 1)\n board.undoMove(move)\n if moveScore > bestScore:\n bestScore = moveScore\n bestMove = move\n return (bestMove, bestScore)\n\n# same as Maxie, but maximizes Minnie's score by minimizing\n# the board score\ndef MinnieMoveWithHeuristics(board, depth):\n if board.gameOver():\n return (None, float('-inf')) if board.won(Minnie) else (None, float('inf'))\n else if depth == maxDepth:\n return (None, heuristic(board))\n else:\n bestMove = None\n bestScore = float('inf')\n for move in board.legalMoves(Minnie):\n board.makeMove(move)\n _, moveScore = MaxieMoveWithHeuristics(board, depth + 1)\n board.undoMove(move)\n if moveScore < bestScore:\n bestScore = moveScore\n bestMove = move\n return (bestMove, bestScore)\n\n#################################################################\n# psuedocode for minimax with heuristics and alpha-beta pruning #\n#################################################################\n\n# takes a board, depth, alpha, and beta where alpha and beta are \n# the best scores guaranteed for Maxie and Minnie, respectively.\n# Returns a tuple (move, score) where move is the\n# best move for Maxie and score is the board score that results\n# from making that move. The best move is the one that maximizes\n# Maxie's score by maximizing the board score.\n# Uses alpha-beta pruning to prune this part of the game tree if it\n# detects that this branch will never be relevant to the overall search.\n# If depth is the max depth, returns the score given by a heuristic function\ndef MaxieMoveAlphaBeta(board, depth, alpha, beta):\n assert(alpha < beta)\n if board.gameOver():\n return (None, float('inf')) if board.won(Maxie) else (None, float('-inf'))\n else if depth == maxDepth:\n return (None, heuristic(board))\n else:\n bestMove = None\n bestScore = float('-inf')\n for move in board.legalMoves(Maxie):\n board.makeMove(move)\n _, moveScore = MinnieMoveAlphaBeta(board, depth + 1, alpha, beta)\n board.undoMove(move)\n if moveScore > bestScore:\n bestScore = moveScore\n bestMove = move\n alpha = max(alpha, bestScore)\n if (alpha >= beta):\n return (bestMove, bestScore)\n return (bestMove, bestScore)\n\n# same as Maxie, but maximizes Minnie's score by minimizing\n# the board score\ndef MinnieMoveAlphaBeta(board, depth, alpha, beta):\n assert(alpha < beta)\n if board.gameOver():\n return (None, float('-inf')) if board.won(Minnie) else (None, float('inf'))\n else if depth == maxDepth:\n return heuristic(board)\n else:\n bestMove = None\n bestScore = float('inf')\n for move in board.legalMoves(Minnie):\n board.makeMove(move)\n _, moveScore = MaxieMoveAlphaBeta(board, depth + 1, alpha, beta)\n board.undoMove(move)\n if moveScore < bestScore:\n bestScore = moveScore\n bestMove = move\n beta = min(beta, bestScore)\n if (alpha >= beta):\n return (bestMove, bestScore)\n return (bestMove, bestScore)" }, { "alpha_fraction": 0.575056791305542, "alphanum_fraction": 0.5844173431396484, "avg_line_length": 36.44845199584961, "blob_id": "9f276760b4da1eeefc6a615fbc15ade4ba0189a5", "content_id": "4cf6fb2d798971551cda8bb57d6214fd658abf2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14529, "license_type": "no_license", "max_line_length": 101, "num_lines": 388, "path": "/15112-CMU/week10/notes-recursion-maze-solver.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# mazeSolver.py\n\nfrom tkinter import *\nimport random\nimport math\nimport copy\n\n############################## backtracking ####################################\n\nNORTH = (-1, 0)\nSOUTH = ( 1, 0)\nEAST = ( 0, 1)\nWEST = ( 0, -1)\n\ndef isValid(data, row, col, direction):\n if not (0 <= row < len(data.maze) and 0 <= col < len(data.maze[0])):\n return False\n return direction in data.maze[row][col].bridges\n\ndef solve(data, row, col, visited, alreadySeen):\n # base case - reach the end of the map and we're done!\n if row == len(data.maze)-1 and col == len(data.maze[0])-1:\n return visited\n # recursive case - try each possible direction from the current point\n for direction in [SOUTH, EAST, NORTH, WEST]:\n drow, dcol = direction\n # It's a valid move if there is a bridge and we haven't visited it\n if isValid(data, row, col, direction) and \\\n (row + drow, col + dcol) not in alreadySeen:\n # Make the move by adding the new point to visited and updating the\n # current row/col\n visited.append((row + drow, col + dcol))\n alreadySeen.add((row + drow, col + dcol))\n tmpSolution = solve(data, row + drow, col + dcol, visited, alreadySeen)\n if tmpSolution != None:\n return tmpSolution\n # Make sure to undo the move if the solution doesn't work out!\n visited.pop()\n # We won't undo alreadySeen, because we know we can't reach the end from here.\n return None\n\ndef fullSolve(data, row, col, visited, alreadySeen, fullSolution):\n # This version keeps track of each backtracking step, so we can visualize it\n fullSolution.append(copy.deepcopy(visited))\n # base case - reach the end of the map and we're done!\n if row == len(data.maze)-1 and col == len(data.maze[0])-1:\n return visited\n # recursive case - try each possible direction from the current point\n for direction in [SOUTH, EAST, NORTH, WEST]:\n drow, dcol = direction\n # It's a valid move if there is a bridge and we haven't visited it\n if isValid(data, row, col, direction) and \\\n (row + drow, col + dcol) not in alreadySeen:\n # Make the move by adding the new point to visited and updating the\n # current row/col\n visited.append((row + drow, col + dcol))\n alreadySeen.add((row + drow, col + dcol))\n tmpSolution = fullSolve(data, row + drow, col + dcol, visited, alreadySeen, fullSolution)\n if tmpSolution != None:\n return tmpSolution\n # Make sure to undo the move if the solution doesn't work out!\n visited.pop()\n # We won't undo alreadySeen, because we know we can't reach the end from here.\n return None\n \n\ndef solveMaze(data, getFull=False):\n visited = [(0, 0)]\n alreadySeen = set()\n if getFull:\n fullSolution = []\n sol = fullSolve(data, 0, 0, visited, alreadySeen, fullSolution)\n return fullSolution\n return solve(data, 0, 0, visited, alreadySeen)\n\n############################## interactive #####################################\n\ndef keyPressed(event, data):\n row, col = data.playerSpot\n if data.inHelpScreen:\n data.inHelpScreen = False\n elif event.char == \"+\":\n init(data, data.rows+1, data.cols+1, False)\n elif event.char == \"-\":\n init(data, data.rows-1, data.cols-1, False)\n elif event.char == \"h\":\n data.inHelpScreen = True\n elif event.char == \"r\":\n resetGame(data)\n elif event.char == \"p\":\n data.isPolar = not data.isPolar\n elif event.char == \"c\":\n data.cycle = not data.cycle\n resetGame(data)\n elif event.char == \"s\":\n # toggle solution\n if data.solution == None:\n data.solution = solveMaze(data)\n data.inBacktrack = False\n data.fullSolution = None\n data.backtrackIndex = 0\n else:\n data.solution = None\n elif event.char == \"b\":\n # toggle backtracking!\n data.inBacktrack = not data.inBacktrack\n data.fullSolution = solveMaze(data, getFull=True)\n data.backtrackIndex = 0\n \n # Different key movement for backtrack mode vs. normal mode\n if data.inBacktrack:\n if event.keysym in [\"Up\", \"Right\"] and \\\n data.backtrackIndex < len(data.fullSolution)-1:\n data.backtrackIndex += 1\n elif event.keysym in [\"Down\", \"Left\"] and data.backtrackIndex > 0:\n data.backtrackIndex -= 1\n else:\n if event.keysym == \"Up\" and isValid(data, row, col, NORTH):\n doMove(data, row, col, NORTH)\n elif event.keysym == \"Down\" and isValid(data, row, col, SOUTH):\n doMove(data, row, col, SOUTH)\n elif event.keysym == \"Left\" and isValid(data, row, col, WEST):\n doMove(data, row, col, WEST)\n elif event.keysym == \"Right\" and isValid(data, row, col, EAST):\n doMove(data, row, col, EAST)\n\ndef resetGame(data):\n rows, cols = len(data.maze), len(data.maze[0])\n data.solution = None\n data.path = [(0, 0)]\n data.playerSpot = (0, 0)\n if data.inBacktrack:\n data.fullSolution = solveMaze(data, getFull=True)\n data.backtrackIndex = 0\n data.maze = makeBlankMaze(rows, cols)\n connectIslands(data, data.maze)\n if data.inBacktrack:\n data.fullSolution = solveMaze(data, getFull=True)\n else:\n data.fullSolution = None\n\ndef doMove(data, row, col, direction):\n (drow, dcol) = direction\n if not (0 <= row < len(data.maze) and 0 <= col < len(data.maze[0])): \n return False\n if len(data.path) >= 2 and data.path[-2] == (row + drow, col + dcol):\n data.path.pop() # undo last move\n elif (row + drow, col + dcol) in data.path:\n return False # we can't move there, it's already in the path!\n else:\n data.path.append((row + drow, col + dcol))\n data.playerSpot = (row + drow, col + dcol)\n\ndef mousePressed(event, data): pass\n\ndef timerFired(data): pass\n\n##################################### draw #####################################\n\ndef redrawAll(canvas, data):\n if data.inHelpScreen: \n return drawHelpScreen(canvas, data)\n canvas.create_rectangle(0, 0, data.width, data.height, fill = \"black\")\n drawBridges(canvas, data)\n drawIslands(canvas, data)\n if data.inBacktrack:\n highlightPath(canvas, data, data.fullSolution[data.backtrackIndex], data.solutionColor)\n elif data.solution != None: \n highlightPath(canvas, data, data.solution, data.solutionColor)\n # Draw the current player path\n highlightPath(canvas, data, data.path, data.pathColor)\n (pRow, pCol) = data.playerSpot\n drawCircle(canvas, islandCenter(data, pRow, pCol), \n data.islandR, data.playerColor)\n\ndef drawHelpScreen(canvas, data):\n message = \"\"\"\narrows to solve manually\ns to toggle solution on/off\nb to toggle backtrack visualizer on/off\nc to toggle cycles on/off\np to toggle circular (polar) on/off\nr to reset (make new maze)\n+ to increase maze size\n- to decrease maze size\nh to view this help screen\npress any key to continue\n\"\"\"\n canvas.create_text(data.width/2, 50, text=\"Maze Solver\", \n font=\"Helvetica 32 bold\")\n canvas.create_text(data.width/2, data.height/2, text=message, \n justify=\"center\", font=\"Helvetica 24 bold\")\n\ndef drawIslands(canvas, data):\n for row in range(len(data.maze)):\n for col in range(len(data.maze[0])):\n drawCircle(canvas, islandCenter(data, row, col), \n data.islandR, data.islandColor)\n\ndef drawCircle(canvas, position, r, color):\n (cx, cy) = position\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, fill=color, width=0)\n\ndef islandCenter(data, row, col):\n if data.isPolar:\n cx, cy = data.width/2, data.height/2\n rows, cols = len(data.maze), len(data.maze[0])\n maxR = min(cx, cy)\n r = maxR*(row+1)/(rows+1)\n theta = 2*math.pi*col/cols\n return cx + r*math.cos(theta), cy - r*math.sin(theta)\n else:\n return (col + 0.5) * data.cW, (row + 0.5) * data.cH\n\ndef drawBridges(canvas, data):\n for r in range(len(data.maze)):\n for c in range(len(data.maze[0])):\n island = data.maze[r][c]\n # Only draw East and South to avoid duplication\n if EAST in island.bridges:\n canvas.create_line(islandCenter(data, r, c),\n islandCenter(data, r, c+1),\n fill=data.bridgeColor, width=data.bridgeSize)\n if SOUTH in island.bridges:\n canvas.create_line(islandCenter(data, r, c),\n islandCenter(data, r+1, c),\n fill=data.bridgeColor, width=data.bridgeSize)\n\ndef highlightPath(canvas, data, path, color):\n for i in range(len(path)):\n (row, col) = path[i]\n # Highlight the islands and bridges in the path\n drawCircle(canvas, islandCenter(data, row, col), data.islandR, color)\n if i != len(path)-1:\n (nRow, nCol) = path[i+1]\n canvas.create_line(islandCenter(data, row, col),\n islandCenter(data, nRow, nCol),\n fill=color, width=data.bridgeSize)\n\n##################################### init #####################################\n\ndef init(data, rows=10, cols=10, inHelpScreen=True):\n if (rows < 1): rows = 1\n if (cols < 1): cols = 1\n data.inHelpScreen = inHelpScreen\n data.rows = rows\n data.cols = cols\n data.islandColor = \"dark green\"\n data.bridgeColor = \"white\"\n data.pathColor = \"blue\"\n data.playerColor = \"green\"\n data.solutionColor = \"red\"\n data.inBacktrack = False\n data.fullSolution = None\n data.backtrackIndex = 0\n data.isPolar = False\n data.cycle = False\n data.path = [(0, 0)]\n data.solution = None\n data.playerSpot = (0, 0)\n margin = 5\n data.cW = (data.width - margin) / cols\n data.cH = (data.height - margin) / rows\n data.islandR = min(data.cW, data.cH) / 6\n data.bridgeSize = min(data.cW, data.cH) / 15\n data.margin = margin\n #make the islands\n data.maze = makeBlankMaze(rows,cols)\n #connect the islands\n connectIslands(data, data.maze)\n\nclass Island(object):\n def __init__(self, number):\n self.number = number\n self.bridges = { } # start with no bridges\n\n\ndef makeBlankMaze(rows, cols):\n return [ [ Island(row*cols + col) for col in range(cols) ] \n for row in range(rows) ]\n\ndef connectIslands(data, islands):\n if data.cycle == True:\n connectCycleIslands(data, islands)\n else:\n connectRegularIslands(data, islands)\n\ndef connectCycleIslands(data, islands):\n rows, cols = len(islands), len(islands[0])\n dirs = [ NORTH, EAST, SOUTH, WEST ]\n changeCount = 0\n while solveMaze(data) == None:\n changeMade = False\n while not changeMade:\n row, col = random.randint(0, rows-1), random.randint(0, cols-1)\n start = islands[row][col]\n random.shuffle(dirs)\n (drow, dcol) = dirs[0]\n if (0 <= row + drow < rows and 0 <= col + dcol < cols) and \\\n (drow, dcol) not in start.bridges:\n target = islands[row + drow][col + dcol]\n start.bridges[(drow, dcol)] = target\n target.bridges[(-drow,-dcol)] = start\n changeMade = True\n changeCount += 1\n \ndef connectRegularIslands(data, islands):\n rows, cols = len(islands), len(islands[0])\n for i in range(rows*cols-1):\n makeBridge(islands)\n\ndef makeBridge(islands):\n rows, cols = len(islands), len(islands[0])\n dirs = [ NORTH, EAST, SOUTH, WEST ]\n while True:\n row, col = random.randint(0, rows-1), random.randint(0, cols-1)\n random.shuffle(dirs)\n (drow, dcol) = dirs[0]\n start = islands[row][col]\n if not (0 <= row + drow < rows and 0 <= col + dcol < cols):\n continue # out of bounds\n elif (drow, dcol) in start.bridges:\n continue # we already have that bridge!\n target = islands[row + drow][col + dcol]\n if start.number == target.number:\n continue # they're already connected- no cycles!\n start.bridges[(drow, dcol)] = target\n target.bridges[(-drow, -dcol)] = start\n renameIslands(start, target, islands)\n #only got here if a bridge was made\n return\n\ndef renameIslands(i1, i2, islands):\n lo, hi = min(i1.number, i2.number), max(i1.number, i2.number)\n for i in range(len(islands)):\n for j in range(len(islands[i])):\n if islands[i][j].number == hi: \n islands[i][j].number = lo\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(600, 600)" }, { "alpha_fraction": 0.645016074180603, "alphanum_fraction": 0.6681671738624573, "avg_line_length": 28.283018112182617, "blob_id": "18be9b1532f4694e04dd8891ebe58674fb3d250f", "content_id": "4a106eddf6a9066e7d4a345915628c21976a7188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1555, "license_type": "no_license", "max_line_length": 89, "num_lines": 53, "path": "/15112-CMU/FIFAworldcup copy2/GameInfo.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from Ball import *\nfrom const import *\nimport pygame , sys\nimport colors\nfrom Team import *\nfrom Text import *\nfrom Sounds import *\n\nMARGIN_RIGHT = BACKGROUND_WIDTH - GAP_SIZE_WIDTH -10\nMARGIN_TOP = int(TABLE_SCORE_HEIGHT + BACKGROUND_HEIGHT / 2 - GOAL_WIDTH / 2)\nDEFENDER_X = GAP_SIZE_WIDTH + 5 / 27 * GAME_WIDTH\nMIDFIELDER_X = GAP_SIZE_WIDTH + 8 / 27 * GAME_WIDTH\nSTRIKER_X = GAP_SIZE_WIDTH + 11 / 27 * GAME_WIDTH\nMIDDLE_Y = TABLE_SCORE_HEIGHT + GAP_SIZE_HEIGHT + GAME_HEIGHT / 2\n\nsound = Sounds()\nclass GameInfo():\n def __init__(self):\n self.redTeamScore = 0\n self.blueTeamScore = 0\n self.blueGoal = pygame.Rect(0 ,MARGIN_TOP ,GAP_SIZE_WIDTH + 10, GOAL_WIDTH )\n self.redGoal = pygame.Rect(MARGIN_RIGHT,MARGIN_TOP ,GAP_SIZE_WIDTH + 10, GOAL_WIDTH)\n \n def isGoal(self):\n if self.blueGoal.colliderect( BALL.rect ):\n self.redTeamScore += 1\n sound.isRedGoal()\n return True\n if self.redGoal.colliderect( BALL.rect ):\n self.blueTeamScore += 1\n sound.isBlueGoal()\n return True\n \n return False\n \n @staticmethod\n def initGame():\n blueTeam = BlueTeam()\n redTeam = RedTeam()\n\n return (blueTeam, redTeam)\n \n @staticmethod\n def gameAfterGoal():\n BALL.ballAfterGoal()\n return GameInfo.initGame()\n\n @staticmethod\n def renderScore(red, blue, screen):\n Text.showScore('Blue Team: ', colors.Blue, screen, 100)\n Text.showScore(str(blue), colors.Blue, screen, 240)\n Text.showScore(str(red), colors.Red, screen, 310)\n Text.showScore(': Red Team', colors.Red, screen, 450)\n\n " }, { "alpha_fraction": 0.5721004605293274, "alphanum_fraction": 0.5815588235855103, "avg_line_length": 43.27231216430664, "blob_id": "df2b97a53c484003675f1d8d369de13e61e682d8", "content_id": "80aec81f682d2e5a6e2f12ce8fb8ab1c6037cefb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19348, "license_type": "no_license", "max_line_length": 99, "num_lines": 437, "path": "/15112-CMU/Design Proposal and TP/TP/FIFAplayer.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# This file is mainly for the super class for all FIFA players\nimport pygame\nfrom enum import Enum\nfrom Ball import *\nimport os\n\n\n# helper function for loading images\ndef loadImgHelper(path, team, FIFAPlayerSize, sprites, key):\n for filename in os.listdir(path + team + \"/\" + key + \"/\"):\n if filename.endswith(\".png\"):\n image = pygame.image.load(path + team + \"/\" + key + \"/\" + filename)\n resizedImage = pygame.transform.scale(image,\n (FIFAPlayerSize, FIFAPlayerSize))\n if \"-\" not in key:\n sprites[key].append(resizedImage)\n else:\n if key == \"up-right\":\n char = 'upRight'\n sprites[char].append(resizedImage)\n elif key == \"up-left\":\n char = 'upLeft'\n sprites[char].append(resizedImage)\n elif key == \"down-right\":\n char = 'downRight'\n sprites[char].append(resizedImage)\n elif key == \"down-left\":\n char = 'downLeft'\n sprites[char].append(resizedImage)\n\n\n# helper function for add Images to sprites\ndef addImagesToSprites(sprites, team, FIFAPlayerSize):\n path = \"assets/images/player/\"\n for key in sprites:\n if key == 'up':\n loadImgHelper(path, team, FIFAPlayerSize, sprites, key)\n if key == 'down':\n loadImgHelper(path, team, FIFAPlayerSize, sprites, key)\n if key == \"right\":\n loadImgHelper(path, team, FIFAPlayerSize, sprites, key)\n if key == 'left':\n loadImgHelper(path, team, FIFAPlayerSize, sprites, key)\n if key == 'upRight':\n directory = \"up-right\"\n loadImgHelper(path, team, FIFAPlayerSize, sprites, directory)\n if key == 'upLeft':\n directory = \"up-left\"\n loadImgHelper(path, team, FIFAPlayerSize, sprites, directory)\n if key == 'downRight':\n directory = \"down-right\"\n loadImgHelper(path, team, FIFAPlayerSize, sprites, directory)\n if key == 'downLeft':\n directory = \"down-left\"\n loadImgHelper(path, team, FIFAPlayerSize, sprites, directory)\n\n\n# helper function for calculating distance between FIFAPlayers and the ball\ndef calculateDistanceBetweemBallAndPlayers(playerRect, ballRect):\n return (abs(playerRect[0] - ballRect[0])**2 + abs(playerRect[1] - ballRect[1])**2)\n\n\n\n# load distance values into dictionary\ndef loadDict(directions, runningSpeedX, runningSpeedY, distances, playerRect):\n runningSpeed = 6\n for direction in directions:\n playerRect1 = None\n ballRect = ball.rect\n if direction == \"up\":\n playerRect1 = (playerRect.centerx, playerRect.centery - runningSpeed)\n if direction == \"down\":\n playerRect1 = (playerRect.centerx, playerRect.centery + runningSpeed)\n if direction == \"left\":\n playerRect1 = (playerRect.centerx - runningSpeed, playerRect.centery)\n if direction == \"right\":\n playerRect1 = (playerRect.centerx + runningSpeed, playerRect.centery)\n if direction == \"upLeft\":\n playerRect1 = (playerRect.centerx - runningSpeedX,\n playerRect.centery - runningSpeedY)\n if direction == \"upRight\":\n playerRect1 = (playerRect.centerx + runningSpeedX,\n playerRect.centery - runningSpeedY)\n if direction == \"downLeft\":\n playerRect1 = (playerRect.centerx - runningSpeedX,\n playerRect.centery + runningSpeedY)\n if direction == \"downRight\":\n playerRect1 = (playerRect.centerx + runningSpeedX,\n playerRect.centery + runningSpeedY)\n distances[direction] = \\\n calculateDistanceBetweemBallAndPlayers(playerRect1, ballRect)\n\n\n# load distance values into dictionary\ndef loadDict2(directions, runningSpeedX, runningSpeedY, distances, playerRect, goalZoneRect):\n runningSpeed = 5\n for direction in directions:\n playerRect1 = None\n if direction == \"up\":\n playerRect1 = (playerRect.centerx, playerRect.centery - runningSpeed)\n if direction == \"down\":\n playerRect1 = (playerRect.centerx, playerRect.centery + runningSpeed)\n if direction == \"left\":\n playerRect1 = (playerRect.centerx - runningSpeed, playerRect.centery)\n if direction == \"right\":\n playerRect1 = (playerRect.centerx + runningSpeed, playerRect.centery)\n if direction == \"upLeft\":\n playerRect1 = (playerRect.centerx - runningSpeedX,\n playerRect.centery - runningSpeedY)\n if direction == \"upRight\":\n playerRect1 = (playerRect.centerx + runningSpeedX,\n playerRect.centery - runningSpeedY)\n if direction == \"downLeft\":\n playerRect1 = (playerRect.centerx - runningSpeedX,\n playerRect.centery + runningSpeedY)\n if direction == \"downRight\":\n playerRect1 = (playerRect.centerx + runningSpeedX,\n playerRect.centery + runningSpeedY)\n distances[direction] = \\\n calculateDistanceBetweemGoalZoneAndPlayers(playerRect1, goalZoneRect)\n\n\n# this is a small probability event generate function\ndef ballBeenCutIsSmallProb():\n import random\n if random.randint(0, 1000) <= 2:\n return True\n\n# this is the helper function to calculate distance between players and goalZone\ndef calculateDistanceBetweemGoalZoneAndPlayers(playerRect, goalZoneRect):\n return (abs(playerRect[0] - 0)**2 + abs(playerRect[1] - goalZoneRect.centery)**2)\n\n\n# this subclass of Enum is searched by google\n# from https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python\nclass GameState(Enum):\n findBall = 1\n free = 2\n attacking = 3\n computer = 4\n\n\nclass FIFAPlayer(pygame.sprite.Sprite):\n # cite the structure of my class __init__ is followed by 112 TA's slide\n # Dot class example like self.image self.rect and so on\n def __init__(self, centerx, centery, team):\n super(FIFAPlayer, self).__init__()\n self.FIFAPlayerSize = 60\n self.originalX = centerx\n self.originalY = centery\n self.velocity = pygame.Vector2(0, 0)\n # This is the total eight directions\n self.directions = ['up', 'down', 'right', 'left',\n 'upRight', 'upLeft', 'downRight',\n 'downLeft']\n self.sprites = dict()\n for direction in self.directions:\n self.sprites[direction] = []\n addImagesToSprites(self.sprites, team, self.FIFAPlayerSize)\n if team == 'blue-team':\n # this is the start direction when the game begin\n self.direction = 'right'\n elif team == 'red-team':\n # this is the start direction when the game begin\n self.direction = 'left'\n self.images = self.sprites[self.direction]\n # this is the start image when the game begin and\n # it also the first image when change direction\n self.image = self.images[0]\n self.rect = pygame.Rect(centerx - self.FIFAPlayerSize / 2,\n centery - self.FIFAPlayerSize / 2,\n self.FIFAPlayerSize, self.FIFAPlayerSize)\n # the purpose for player rect is to cut the real player out of the image\n # since self.rect is the Rect of image not the real player\n # and the real player is smaller than the image\n self.playerRect = pygame.Rect(self.rect.centerx - self.FIFAPlayerSize / 3.8,\n self.rect.centery,\n self.FIFAPlayerSize / 4,\n self.FIFAPlayerSize / 4)\n self.validFieldRect = pygame.Rect(boundaryElem()[4], boundaryElem()[5] + boundaryElem()[6],\n boundaryElem()[2], boundaryElem()[3])\n self.runningSpeed = 6\n self.gameState = GameState.free\n self.goalZoneRect = pygame.Rect(boundaryElem()[4],\n (boundaryElem()[5]+boundaryElem()[6] +\n 1 / 3 * (boundaryElem()[3]) - 50), 150, 300)\n self.goalZoneRect2 = pygame.Rect(boundaryElem()[4] + boundaryElem()[2] - 250,\n (boundaryElem()[5] + boundaryElem()[6] +\n 1 / 3 * (boundaryElem()[3]) - 50), 150, 300)\n\n\n # this function is used to update the direction of players\n def updateDirect(self, direction):\n self.direction = direction\n self.images = self.sprites[self.direction]\n self.image = self.images[1]\n\n\n # this is the helper function for move method\n def moveHelper(self, direction, runningSpeedX, runningSpeedY):\n if direction == 'up':\n if self.validFieldRect.contains\\\n (self.playerRect.move(0, -self.runningSpeed)):\n self.rect = self.rect.move(0, -self.runningSpeed)\n elif direction == 'down':\n if self.validFieldRect.contains\\\n (self.playerRect.move(0, self.runningSpeed)):\n self.rect = self.rect.move(0, self.runningSpeed)\n elif direction == 'left':\n if self.validFieldRect.contains\\\n (self.playerRect.move(-self.runningSpeed, 0)):\n self.rect = self.rect.move(-self.runningSpeed, 0)\n elif direction == 'right':\n if self.validFieldRect.contains\\\n (self.rect.move(0, self.runningSpeed)):\n self.rect = self.rect.move(self.runningSpeed, 0)\n elif direction == 'upLeft':\n if self.validFieldRect.contains\\\n (self.playerRect.move(-runningSpeedX, -runningSpeedY)):\n self.rect = self.rect.move(-runningSpeedX, -runningSpeedY)\n elif direction == 'upRight':\n if self.validFieldRect.contains\\\n (self.playerRect.move(runningSpeedX, -runningSpeedY)):\n self.rect = self.rect.move(runningSpeedX, -runningSpeedY)\n elif direction == 'downLeft':\n if self.validFieldRect.contains\\\n (self.playerRect.move(-runningSpeedX, runningSpeedY)):\n self.rect = self.rect.move(-runningSpeedX, runningSpeedY)\n elif direction == 'downRight':\n if self.validFieldRect.contains\\\n (self.playerRect.move(runningSpeedX, runningSpeedY)):\n self.rect = self.rect.move(runningSpeedX, runningSpeedY)\n\n # I searched the move rect method like rect.move() on pygame\n # documentaion, here is the link\n # https://github.com/search?q=pygame.Rect.move&type=Code\n # this is the move method for FIFAPlayer\n def move(self, direction):\n runningSpeedX = self.runningSpeed / (2 ** 0.5)\n runningSpeedY = runningSpeedX\n self.updateDirect(direction)\n if ball.owner == self:\n self.afterPlayerMoveUpdateBall()\n self.moveHelper(direction, runningSpeedX, runningSpeedY)\n\n\n\n\n # This is the update method for FIFAPlayer\n def update(self):\n self.playerRect = pygame.Rect(self.rect.centerx - self.FIFAPlayerSize / 3.8,\n self.rect.centery,\n self.FIFAPlayerSize / 4,\n self.FIFAPlayerSize / 4)\n\n # this is the shoot method for FIFAPlayer in this method\n # since the ball is global I can call the ball's shoot method\n def shoot(self):\n self.runningSpeed = 6\n if ball.owner == self:\n if self.direction == 'left':\n vel = pygame.Vector2(-1, 0).normalize()\n ball.shoot(vel)\n elif self.direction == 'right':\n vel = pygame.Vector2(1, 0).normalize()\n ball.shoot(vel)\n elif self.direction == 'up':\n vel = pygame.Vector2(0, -1).normalize()\n ball.shoot(vel)\n elif self.direction == 'down':\n vel = pygame.Vector2(0, 1).normalize()\n ball.shoot(vel)\n elif self.direction == 'upRight':\n vel = pygame.Vector2(1, -1).normalize()\n ball.shoot(vel)\n elif self.direction == 'upLeft':\n vel = pygame.Vector2(-1, -1).normalize()\n ball.shoot(vel)\n elif self.direction == 'downRight':\n vel = pygame.Vector2(1, 1).normalize()\n ball.shoot(vel)\n elif self.direction == 'downLeft':\n vel = pygame.Vector2(-1, 1).normalize()\n ball.shoot(vel)\n\n\n # this is the cross ball method of FIFA player\n def crossBall(self):\n self.runningSpeed = 6\n if ball.owner == self:\n directionVector = pygame.Vector2(self.goalZoneRect2.centerx\n - self.rect.centerx,\n self.goalZoneRect2.centery\n - self.rect.centery)\n vel = directionVector.normalize()\n ball.shoot(vel)\n\n\n # this is the helper function for finding ball\n def changeGameState(self):\n self.gameState = GameState.computer\n self.getBall()\n\n\n # this is the helper function for find ball method\n def findBallHelper(self):\n if ball.owner != None:\n # the probability of ball been cut off by redTeam(computer)\n # when the ball is controlled by blueTeam(player)\n if ballBeenCutIsSmallProb():\n self.changeGameState()\n elif ball.owner == None:\n # the ball is controlled by nobody, computer will get the ball\n self.changeGameState()\n\n\n # this is the run to find ball method for FIFA players\n def runToFindBall(self):\n # prevent the case when blueTeam player shoot\n # been cut off by red team player\n if ball.owner == None:\n # since the min passBall speed vector's length is 15\n if (ball.speed[0] ** 2 +\n ball.speed[1] **2 ) ** 0.5 > 10:\n return None\n runningSpeedX = self.runningSpeed / (2 ** 0.5)\n runningSpeedY = runningSpeedX\n # the ball is controlled by computer\n if ball.owner == self:\n return None\n distances = dict()\n loadDict(self.directions, runningSpeedX,\n runningSpeedY, distances, self.playerRect)\n minDistance = distances[\"down\"]\n minDirection = \"down\"\n # select the direction which has the min distance to the ball\n for key in distances:\n if distances[key] < minDistance:\n minDistance = distances[key]\n minDirection = key\n self.move(minDirection)\n self.findBallHelper()\n\n\n # this is the get ball method and self is blue or red team player object\n def getBall(self):\n if ball.owner != self:\n if self.playerRect.colliderect(ball.rect):\n ball.owner = self\n\n\n # this method update ball's position after the ball was get by FIFAPlayer\n def afterPlayerMoveUpdateBall(self):\n if ball.owner == self:\n if self.direction == \"up\":\n # this is the ball's position relative to the players\n # makes the screen look better\n ball.rect.centery = self.rect.centery + self.FIFAPlayerSize / 4\n elif self.direction == \"down\":\n ball.rect.centery = self.rect.centery + self.FIFAPlayerSize / 4\n elif self.direction == \"left\":\n ball.rect.centerx = self.rect.centerx - self.FIFAPlayerSize / 6\n elif self.direction == \"right\":\n ball.rect.centerx = self.rect.centerx + self.FIFAPlayerSize / 6\n else:\n # for directions like upRight,upLeft, downLeft and downRight\n ball.rect.centerx = self.rect.centerx\n ball.rect.centery = self.rect.centery + self.FIFAPlayerSize / 3\n\n\n # this is the function to implement game AI to\n # bLue and red team players and it is also the\n # most difficult part of this game\n def aiMove(self, aIMoveUp, aIMoveDown, aIMoveLeft, aIMoveRight):\n offset = 100\n if aIMoveUp and aIMoveLeft:\n self.move('upLeft')\n elif aIMoveUp and aIMoveRight:\n self.move('upRight')\n elif aIMoveDown and aIMoveLeft:\n self.move('downLeft')\n elif aIMoveDown and aIMoveRight:\n self.move('downRight')\n elif aIMoveLeft and self.rect.centerx > offset:\n self.move('left')\n elif aIMoveRight and self.rect.centerx < \\\n boundaryElem()[7] - offset:\n self.move('right')\n elif aIMoveUp:\n self.move('up')\n elif aIMoveDown:\n self.move('down')\n\n\n # this is the gameAi helper function basically\n # using the difference of player's original position and its\n # current postion on the screen to implement the game AI\n # the reason why I add the offset is if offset == 0,\n # there will be some error if I don't move my current player\n # following is my ai helper function which gives a series of bool values\n # on whether to move up, down, right, left, upLeft, upRight, downLeft, downRight\n def aiHelper(self, team):\n offset = 15\n aIMoveRight = True if self.rect.centerx - self.originalX + offset \\\n < team.player.rect.centerx - team.player.originalX else False\n aIMoveLeft = True if self.rect.centerx - self.originalX - offset \\\n > team.player.rect.centerx - team.player.originalX else False\n aIMoveUp = True if self.rect.centery - self.originalY > offset else False\n aIMoveDown = True if - (self.rect.centery - self.originalY) > offset else False\n self.aiMove(aIMoveUp, aIMoveDown, aIMoveLeft, aIMoveRight)\n\n\n # this is the gameAI main function\n def teamPlayerAI(self, team):\n if self.gameState == GameState.computer:\n runningSpeedX = 5 / (2 ** 0.5)\n runningSpeedY = runningSpeedX\n distances = dict()\n playerRect = team.player.rect\n loadDict2(self.directions, runningSpeedX,\n runningSpeedY, distances, playerRect, self.goalZoneRect)\n minDistance = distances[\"down\"]\n minDirection = \"down\"\n for key in distances:\n if distances[key] < minDistance:\n minDistance = distances[key]\n minDirection = key\n self.move(minDirection)\n squareOfdistance = 60000\n if calculateDistanceBetweemGoalZoneAndPlayers\\\n (self.rect, self.goalZoneRect) < squareOfdistance:\n self.shoot()\n elif self.gameState == GameState.attacking:\n if self != team.player:\n self.aiHelper(team)\n elif self.gameState == GameState.findBall:\n self.runToFindBall()\n\n" }, { "alpha_fraction": 0.7070707082748413, "alphanum_fraction": 0.7323232293128967, "avg_line_length": 27.428571701049805, "blob_id": "f26004db6a83e7781b6e46ade494d12a7a62d570", "content_id": "c6d7266103e06861ee8abf5d9a96f6cb46aca09a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/15112-CMU/week4 cold cold/哈哈哈.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def valueOfLetter(letterScores, letter):\n indexOfLetter = ord(letter) - 97\n value = letterScores[indexOfLetter]\n return value\n\nletterScores = [1] * 26\nprint(valueOfLetter(letterScores,\"z\"))" }, { "alpha_fraction": 0.6048719882965088, "alphanum_fraction": 0.6242774724960327, "avg_line_length": 28.9135799407959, "blob_id": "21fb4d8f68f037230748f37eff734448013a9054", "content_id": "1e14ab7e256e01ee01e1d55be4c10d6e052db07d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2422, "license_type": "no_license", "max_line_length": 78, "num_lines": 81, "path": "/15112-CMU/112-opencv-tutorial-master/manualErosionAndDilation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# This file is meant to demonstrate erosions and dilations in OpenCV.\n# For some of the theory, please see wikipedia / etc.\n\nimport cv2\nimport numpy as np\n\nWHITE = 255\nBLACK = 0\nTHRESH = 127\n\nksize = 5\n\n# Dilation essentially makes white / bright areas bigger, and makes\n# black / dark images smaller. It is done by taking the max of the \n# kernel iterated over the entire image.\n# Only works for grayscale images\n# This function is SLOW\ndef manual_dilate(image):\n dilated = np.zeros(image.shape, np.uint8)\n i,j = 0,0\n offset = int(ksize / 2)\n for i in xrange(image.shape[0]):\n for j in xrange(image.shape[1]):\n a = image.take(range(i-offset, i+offset + 1), mode=\"wrap\", axis=0)\n b = a.take(range(j-offset, j+offset + 1), mode=\"wrap\", axis=1)\n dilated[i][j] = np.amax(b)\n return dilated\n\n# The opposite of dilation, erosion makes dark areas bigger, and bright\n# areas smaller. This is done by taking the min of the kernel, iterated\n# over the entire image.\n# Only works for grayscale images\n# This function is SLOW\ndef manual_erode(image):\n eroded = np.zeros(image.shape, np.uint8)\n i,j = 0,0\n offset = int(ksize / 2)\n for i in xrange(image.shape[0]):\n for j in xrange(image.shape[1]):\n a = image.take(range(i-offset, i+offset + 1), mode=\"wrap\", axis=0)\n b = a.take(range(j-offset, j+offset + 1), mode=\"wrap\", axis=1)\n eroded[i][j] = np.amin(b)\n return eroded\n\n# Our simple threshold function from before.\ndef threshold(image):\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n _, thresholded = cv2.threshold(grey, THRESH, WHITE, cv2.THRESH_BINARY)\n return thresholded\n\ndef main():\n\n window_name = \"Webcam!\"\n\n cam_index = 0\n cv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\n\n cap = cv2.VideoCapture(cam_index)\n cap.open(cam_index)\n\n while True:\n\n ret, frame = cap.read()\n\n if frame is not None:\n # First we do a threshold on our image\n thresh = threshold(frame)\n cv2.imshow(window_name, thresh)\n cv2.imshow(\"Manual Dilate\", manual_dilate(thresh))\n cv2.imshow(\"Manual Erode\", manual_erode(thresh))\n \n k = cv2.waitKey(1) & 0xFF\n if k == 27: # Escape key\n cv2.destroyAllWindows()\n cap.release()\n break\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6569464206695557, "alphanum_fraction": 0.6650071144104004, "avg_line_length": 38.046295166015625, "blob_id": "e7f3e0f676be0e5c81bda7850973f5036056a4ea", "content_id": "2587fc6b574930ffca09562a6571c04bf20703e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4218, "license_type": "no_license", "max_line_length": 80, "num_lines": 108, "path": "/15112-CMU/week3/test0.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def replaceWhiteSpace(text):\n element = text.split()\n newText = \" \".join(element)\n return newText\n\n\ndef rightJustifyText(text, width):\n text = replaceWhiteSpace(text)\n lenOfStr = len(text)\n i = width\n judgeIndex = i\n while judgeIndex < lenOfStr:\n if text[i] == \" \":\n text = text[:i] + \"\\n\" + text[i + 1:]\n judgeIndex = i + width + 1\n i = judgeIndex\n else:\n while text[i] != \" \":\n i -= 1\n if text[i] == \" \":\n text = text[:i] + \"\\n\" + text[i + 1:]\n judgeIndex = i + width + 1\n i = judgeIndex\n if judgeIndex > lenOfStr - 1:\n break\n lines = text.split('\\n')\n result = \"\"\n for line in lines:\n spaces = width - len(line)\n if lines.index(line) != len(lines) - 1:\n result += (\" \" * spaces + line + \"\\n\")\n else:\n result += (\" \" * spaces + line)\n return result\n\n\n\n\ndef testRightJustifyText():\n print(\"Testing rightJustifyText()...\", end=\"\")\n text1 = \"\"\"\\\nWe hold these truths to be self-evident: that all men are created equal;\nthat they are endowed by their Creator with certain unalienable rights;\nthat among these are life, liberty, and the pursuit of happiness.\"\"\"\n text1Result = \"\"\"\\\n We hold these truths to be\nself-evident: that all men are\n created equal; that they are\n endowed by their Creator with\n certain unalienable rights;\n that among these are life,\n liberty, and the pursuit of\n happiness.\"\"\"\n assert(rightJustifyText(text1, 30) == text1Result)\n\n# testRightJustifyText()\n\n\n# rightJustifyText(text, 20)\ntext1 = \"\"\"\\\nWe hold these truths to be self-evident: that all men are created equal;\nthat they are endowed by their Creator with certain unalienable rights;\nthat among these are life, liberty, and the pursuit of happiness.\"\"\"\n\ntext1Result = \"\"\"\\\n We hold these truths to be\nself-evident: that all men are\n created equal; that they are\n endowed by their Creator with\n certain unalienable rights;\n that among these are life,\n liberty, and the pursuit of\n happiness.\"\"\"\n\ntext2 = \"\"\"\\\nThough, in reviewing the incidents of my administration,\nI am unconscious of intentional error, I am nevertheless too sensible of my\ndefects not to think it probable that I may have committed many errors.\nI shall also carry with me the hope that my country will view them with\nindulgence; and that after forty-five years of my life dedicated to its service\nwith an upright zeal, the faults of incompetent abilities will be consigned to\noblivion, as I myself must soon be to the mansions of rest.\n\nI anticipate with pleasing expectation that retreat in which I promise myself\nto realize the sweet enjoyment of partaking, in the midst of my fellow-citizens,\nthe benign influence of good laws under a free government,\nthe ever-favorite object of my heart, and the happy reward,\nas I trust, of our mutual cares, labors, and dangers.\"\"\"\ntext2Result = \"\"\"\\\n Though, in reviewing the incidents of my administration, I am\nunconscious of intentional error, I am nevertheless too sensible of my\n defects not to think it probable that I may have committed many\n errors. I shall also carry with me the hope that my country will view\n them with indulgence; and that after forty-five years of my life\n dedicated to its service with an upright zeal, the faults of\n incompetent abilities will be consigned to oblivion, as I myself must\n soon be to the mansions of rest. I anticipate with pleasing\n expectation that retreat in which I promise myself to realize the\n sweet enjoyment of partaking, in the midst of my fellow-citizens, the\n benign influence of good laws under a free government, the\never-favorite object of my heart, and the happy reward, as I trust, of\n our mutual cares, labors, and dangers.\"\"\"\n# print(rightJustifyText(text2, 70))\nprint(rightJustifyText(text1, 30))\n# print(text1Result)\nprint(repr(rightJustifyText(text1, 30)))\nprint(repr(text1Result))\nprint(rightJustifyText(text1, 30) == text1Result)\n\n" }, { "alpha_fraction": 0.613163948059082, "alphanum_fraction": 0.6307159066200256, "avg_line_length": 27.682119369506836, "blob_id": "826f65b7290babe92e221cdf0121ae587a31a325", "content_id": "659453c9b829c6b9dfcda989baf8db44da2306f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4330, "license_type": "no_license", "max_line_length": 98, "num_lines": 151, "path": "/15112-CMU/FIFAworldcup copy2/main.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import sys, pygame\nfrom pygame.math import Vector2\nimport time\nimport datetime\nimport colors\nfrom Sounds import *\nfrom Text import *\nfrom Background import *\nfrom Player import *\nfrom Ball import *\nfrom functions import *\nfrom const import *\nfrom SettingBoard import * \nfrom GameInfo import *\nfrom Team import *\n\n\nmillis = lambda: int(round(time.time() * 1000))\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d, %d\" % (80, 30)\n\npygame.init()\n\nsounds = Sounds()\nbackground = Background()\n \nsettingBoard = SettingBoard()\nclock = pygame.time.Clock()\n\nscreen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\npygame.display.set_caption(\"FIFAworldcup\")\n\n\ndef renderSettingButton():\n rect = pygame.Rect( 900 , 0 , WIDTH_OF_PAUSE_GAME - 2 * MARGIN, BUTTON_HEIGHT+ 10)\n titleSurf, titleRect = Text.makeTextObject(\"SETTING\", colors.Green)\n pygame.draw.rect(screen, colors.Black, rect)\n screen.blit(titleSurf, ( 930 , 0 , WIDTH_OF_PAUSE_GAME - MARGIN * 2, BUTTON_HEIGHT))\n return rect\n\ndef renderSettingBoard(setting): \n pygame.mouse.set_visible(True)\n while True:\n x = (WINDOW_WIDTH - WITH_SETTING_BOARD) /2\n y = 0\n screen.blit(settingBoard, (x, y))\n for event in pygame.event.get():\n \n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n (x, y) = pygame.mouse.get_pos()\n check = settingBoard.click(x, y)\n if( check == 1 ): # handle new game\n main()\n if(setting.collidepoint(x,y)):\n return\n \n pygame.display.update()\n clock.tick(FPS)\n\ndef renderTimeLeft(counter, text):\n text = str(datetime.timedelta(seconds = counter))[3:].rjust(3) if counter > 0 else 'boom!'\n counter -= 1\n return counter , text\n\ndef renderWinner(red , blue):\n myNewSurface = pygame.Surface((370, 50))\n myNewSurface.fill(colors.Black)\n screen.blit(myNewSurface, (WINDOW_HEIGHT / 2 + 30 , HEIGHT / 2 - 23 ))\n if( red > blue):\n Text.showTextWiner('Red Team Is Winner', colors.Red, screen, WINDOW_WIDTH / 2 , HEIGHT / 2)\n elif( red < blue):\n Text.showTextWiner('Blue Team Is Winner', colors.Red, screen, WINDOW_WIDTH / 2,HEIGHT / 2)\n else: \n Text.showTextWiner('Tow Team Is Harmony', colors.Red, screen, WINDOW_WIDTH / 2 , HEIGHT / 2)\n \ndef main():\n counter, text = 180, '3:00'.rjust(3)\n\n font = pygame.font.SysFont('Consolas', 60)\n pygame.time.set_timer(pygame.USEREVENT, 1000)\n\n\n spriteBall = pygame.sprite.Group(BALL)\n gameInfo = GameInfo()\n sounds.music()\n (blueTeam, redTeam) = GameInfo.initGame()\n\n # setting()\n isPause = False\n startTime = millis()\n while millis() - startTime <= 5000 * 60:\n screen.blit(background, (0, 0))\n GameInfo.renderScore(gameInfo.redTeamScore, gameInfo.blueTeamScore, screen)\n setting = renderSettingButton()\n \n if isPause is False : \n for event in pygame.event.get():\n # QUIT\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.USEREVENT and counter != -1 : \n counter ,text = renderTimeLeft(counter , text)\n # Key Down\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_s:\n if BALL.owner!= blueTeam.player:\n blueTeam.changePlayer()\n else:\n blueTeam.passBall()\n elif event.key == pygame.K_d:\n blueTeam.player.shoot()\n elif event.key == pygame.K_SPACE:\n blueTeam.player.takeBall()\n pass\n\n # Mouse button down\n if event.type == pygame.MOUSEBUTTONDOWN:\n (x, y) = pygame.mouse.get_pos()\n if(setting.collidepoint(x,y)):\n renderSettingBoard(setting)\n\n redTeam.handle()\n blueTeam.handle()\n\n #check ball in goal ?\n if gameInfo.isGoal():\n (blueTeam, redTeam) = gameInfo.gameAfterGoal()\n\n #update sprite\n blueTeam.update()\n redTeam.update()\n BALL.update(blueTeam, redTeam)\n\n #draw to screen\n blueTeam.draw(screen)\n redTeam.draw(screen)\n spriteBall.draw(screen)\n if counter != -1:\n screen.blit(font.render(text, True, (0, 0, 0)), (600, 5))\n elif(counter == -1):\n renderWinner(gameInfo.redTeamScore,gameInfo.blueTeamScore)\n\n pygame.display.update()\n clock.tick(FPS)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.47488585114479065, "alphanum_fraction": 0.5730593800544739, "avg_line_length": 25.57575798034668, "blob_id": "cda189fc67541921f9f41f91f8e2aa53ed99402a", "content_id": "ec46cbf2fafbaa9ffc827947458e5e305adf7afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/15112-CMU/week9/power3ToN.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def powerHelper(n, count, lst):\n if n < 1:\n return lst\n else:\n n = n//3\n count += 1\n num = 3\n return powerHelper(n, count, lst + [num**count])\n\n\n# this is the main function for powersOf3toN(n)\ndef powersOf3ToN(n):\n if n <= 0:\n return []\n lst = []\n count = -1\n return powerHelper(n, count, lst)\n\ndef testPowersOf3ToN():\n print(\"Testing powersOf3ToN...\", end=\"\")\n assert(powersOf3ToN(10.5) == [1, 3, 9])\n assert(powersOf3ToN(9) == [1, 3, 9])\n assert(powersOf3ToN(0) == [])\n assert(powersOf3ToN(0.9876) == [])\n assert(powersOf3ToN(1) == [1])\n assert(powersOf3ToN(-10.5) == [])\n assert(powersOf3ToN(-43) == [])\n assert(powersOf3ToN(2186.5) == [1,3,9,27,81,243,729])\n assert(powersOf3ToN(2187) == [1,3,9,27,81,243,729,2187])\n print(\"Passed!\")\n\nprint(powersOf3ToN(10.5))\ntestPowersOf3ToN()" }, { "alpha_fraction": 0.5140544176101685, "alphanum_fraction": 0.5409326553344727, "avg_line_length": 33.774776458740234, "blob_id": "aae54710e30e01871481cf4e2e45917741f271a6", "content_id": "bfb6c3383ba50bc7d01d956ebfdc6cbe1da6f198", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15440, "license_type": "no_license", "max_line_length": 82, "num_lines": 444, "path": "/15112-CMU/week10/hw10.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################################\n# Hw10\n# Your Name: Ming Xu\n# Your Andrew ID:mxu2\n# Your Section:2N\n#################################################################\nimport os\nfrom functools import reduce\n\n# this is the collaborators function\ndef findLargestFileCollaborators():\n return \"yufeiche\"\n\n# this is the helper function for findLargestFile\ndef findLargestFileHelper(path, res, largestFilePath):\n if os.path.isfile(path):\n return path\n else:\n for filename in os.listdir(path):\n if filename == '.DS_Store':\n continue\n tmpPath = findLargestFileHelper(path + \"/\" + filename,\n res, largestFilePath)\n if not os.path.isfile(tmpPath):\n continue\n tmpValue = os.path.getsize(tmpPath)\n if tmpValue >= largestFilePath:\n largestFilePath = tmpValue\n res = tmpPath\n return res\n\n\n# this is the main function for findLargestFile\ndef findLargestFile(path):\n largestFilePath = 0\n res = \"\"\n return findLargestFileHelper(path, res, largestFilePath)\n\n\n# find the position of each num in board\ndef findPositionOfNum(num, board):\n rows = len(board)\n cols = rows\n rowOfNum = 0\n colOfNum = 0\n for row in range(rows):\n for col in range(cols):\n if board[row][col] == num:\n rowOfNum = row\n colOfNum = col\n return rowOfNum, colOfNum\n\n\n# check whether one of the num in board has next step\ndef hasNextStep(num, nextNum, board):\n numRow, numCol = findPositionOfNum(num, board)\n nextNumRow, nextNumCol = findPositionOfNum(nextNum, board)\n if (nextNumRow - numRow) * (nextNumCol - numCol) == 2 or\\\n (nextNumRow - numRow) * (nextNumCol - numCol) == -2:\n return True\n else:\n return False\n\n\n# return True if board is a Knights Tour and return False otherwise\ndef isKnightsTour(board):\n if board == [[0]]:\n return False\n rows = len(board)\n cols = rows\n compareList = [i for i in range(1, cols ** 2 + 1)]\n tourList = []\n for row in range(rows):\n for col in range(cols):\n tourList.append(board[row][col])\n tourList.sort()\n # check whether tourList meet the requirement of Knights tour\n if compareList != tourList:\n return False\n finalNum = rows * cols\n knightsTour = True\n for num in range(1, finalNum):\n nextNum = num + 1\n if hasNextStep(num, nextNum, board):\n knightsTour = True\n continue\n else:\n knightsTour = False\n break\n return knightsTour\n\n\n# this is the create knight's board function\ndef createKnightBoard(n):\n rows, cols = n, n\n board = []\n for i in range(n):\n board += [[0]*cols]\n return board\n\n\n# this is the recursive function for createKnightsTour\ndef traverse(board, x, y, count):\n rows, cols = len(board), len(board)\n board[x][y] = count\n if count >= rows * cols:\n return board\n directionY = [2, 2, 1, -1, -2, -2, -1, 1]\n directionX = [-1, 1, 2, 2, 1, -1, -2, -2]\n numOfdirection = 8\n for i in range(numOfdirection):\n nextX = x + directionX[i]\n nextY = y + directionY[i]\n if (nextX < 0 or nextX >= cols or nextY < 0 or nextY >= rows)\\\n or board[nextX][nextY] != 0:\n continue\n tmpboard = traverse(board, nextX, nextY, count + 1)\n if tmpboard != None:\n return tmpboard\n # if tmpboard is None, then undo the move\n board[nextX][nextY] = 0\n\n\n# This is the main function for createKnightsTour\ndef createKnightsTour(n):\n if n == 1:\n return [[1]]\n if n == 2:\n return None\n board = createKnightBoard(n)\n startRow = 0\n startCol = 0\n count = 1\n traverse(board, startRow, startCol, count)\n if isKnightsTour(board):\n return board\n else:\n return None\n\n\n# this is the decorator for makeExample2DList\ndef print2DListResult(makeExample2DList):\n def printLst(n):\n lst = makeExample2DList(n)\n rows = len(lst)\n cols = len(lst[0])\n res = ''\n for row in range(rows):\n res += (\"[\" + \" \"*n)\n for col in range(cols):\n numOfspace = n\n lenOfnum = len(str(lst[row][col]))\n if lenOfnum > 1:\n numOfspace -= (lenOfnum - 1)\n res += str(lst[row][col])\n res += \" \" * numOfspace\n res += \"]\\n\"\n return res\n return printLst\n\n\n# this is the main function for make example 2D list\n@print2DListResult\ndef makeExample2DList(n):\n myList=[]\n for row in range(n):\n myList.append([col*row for col in range(n)])\n return myList\n\n\n# this is the lambda function for myjoin\nmyJoin = lambda L, sep: str(L[0])\\\n if len(L) == 1 else reduce(lambda x, y: (x + y),\n list(map(lambda x: str(x) + sep,\n L[:-1]))) + str(L[-1])\n\n\n#ignore_rest line\n###################################################\nfrom tkinter import *\n# the maximum level for this animation code is 5,\n# because I only give it five different colors for different levels\n# given more colors it can run on more higher levels\n\n# this is the init function to save data in animation\ndef init(data):\n data.level = 1\n data.depth = -1\n data.color = ['yellow', 'red', 'orange', '#00FF00', 'blue']\n data.lineColor = ['#F5FFFA', 'yellow', 'orange', '#E0FFFF', 'pink']\n data.smallCircleColor = ['#F5FFFA', 'red', 'orange', '#87CEFA', 'pink']\n\n\n# this is the helper function for drawLinesAndCircles\ndef drawCircle(canvas, data, xc, yc, r, depth):\n canvas.create_oval(xc - r / 4, (yc - 2 * r) - r / 4,\n xc + r / 4, (yc - 2 * r) + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc - r / 4, (yc + 2 * r) - r / 4,\n xc + r / 4, (yc + 2 * r) + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval((xc - 2 * r) - r / 4, yc - r / 4,\n (xc - 2 * r) + r / 4, yc + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval((xc + 2 * r) - r / 4, yc - r / 4,\n (xc + 2 * r) + r / 4, yc + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc + 2 ** 0.5 * r - r / 4, yc - 2 ** 0.5 * r - r / 4,\n xc + 2 ** 0.5 * r + r / 4, yc - 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc - 2 ** 0.5 * r - r / 4, yc - 2 ** 0.5 * r - r / 4,\n xc - 2 ** 0.5 * r + r / 4, yc - 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc - 2 ** 0.5 * r - r / 4, yc + 2 ** 0.5 * r - r / 4,\n xc - 2 ** 0.5 * r + r / 4, yc + 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc + 2 ** 0.5 * r - r / 4, yc + 2 ** 0.5 * r - r / 4,\n xc + 2 ** 0.5 * r + r / 4, yc + 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n\n\n# this is the drawLinesAndCircles main function\ndef drawLinesAndCircles(canvas, data, xc, yc, r, depth):\n canvas.create_line(xc, yc, xc, yc - 2 * r, fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc, yc + 2 * r, fill=data.lineColor[depth])\n canvas.create_line(xc + 2 * r, yc, xc, yc, fill=data.lineColor[depth])\n canvas.create_line(xc - 2 * r, yc, xc, yc, fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc + 2 ** 0.5 * r, yc - 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc - 2 ** 0.5 * r, yc - 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc - 2 ** 0.5 * r, yc + 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc + 2 ** 0.5 * r, yc + 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n drawCircle(canvas, data, xc, yc, r, depth)\n\n\n# this is the level0 draw text function\ndef drawText(data, canvas, margin):\n canvas.create_text(data.width / 2, 0,\n text=\"Level %d Fractal\" % (data.level),\n font=\"Arial \" + str(int(margin / 3)) + \" bold\",\n anchor=\"n\", fill='white')\n canvas.create_text(data.width / 2, margin,\n text=\"Use arrows to change level\",\n font=\"Arial \" + str(int(margin / 4)),\n anchor=\"s\", fill='white')\n\n\n# this is the helper function for drawFractalSun main function\ndef drawForLevelO(data, canvas):\n # this following code is only for state level == 0\n margin = min(data.width, data.height) // 10\n xc = data.width // 2\n yc = data.height // 2\n r = 0.6 * data.width // 5\n canvas.create_oval(xc - r, yc - r, xc + r, yc + r, fill='#FFDAB9')\n drawText(data, canvas, margin)\n\n\n# this is the main function for drawFractalSun\ndef drawFractalSun(data, canvas, xc, yc, r, level, depth):\n if level == 0:\n # this following code is only for state level == 0\n drawForLevelO(data, canvas)\n elif level == 1:\n drawLinesAndCircles(canvas, data, xc, yc, r, depth)\n canvas.create_oval(xc - r, yc - r, xc + r, yc + r, fill=data.color[depth])\n else:\n # the following code the recursive case for this problem\n drawFractalSun(data, canvas, xc, yc, r, level - 1, depth + 1)\n drawFractalSun(data, canvas, xc, yc - 2 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc, yc + 2 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc - 2 * r, yc, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc + 2 * r, yc, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc + 2 ** 0.5 * r, yc - 2 ** 0.5 * r, r / 4,\n level - 1, depth - 1)\n drawFractalSun(data, canvas, xc - 2 ** 0.5 * r, yc - 2 ** 0.5 * r, r / 4,\n level - 1, depth - 1)\n drawFractalSun(data, canvas, xc - 2 ** 0.5 * r, yc + 2 ** 0.5 * r, r / 4,\n level - 1, depth - 1)\n drawFractalSun(data, canvas, xc + 2 ** 0.5 * r, yc + 2 ** 0.5 * r, r / 4,\n level - 1, depth - 1)\n\n\n# this is the keyboard event handler\ndef keyPressed(event, data):\n if event.keysym in [\"Up\", \"Right\"]:\n data.level += 1\n elif (event.keysym in [\"Down\", \"Left\"]) and (data.level > 0):\n data.level -= 1\n\n\n# this function only draw background of canvas\ndef drawCanvas(canvas, data):\n topX = 0\n topY = 0\n canvas.create_rectangle(topX, topY,\n data.width, data.height, fill='black')\n\n\n# this is the redrawAll function\ndef redrawAll(canvas, data):\n drawCanvas(canvas, data)\n margin = min(data.width, data.height)//10\n xc, yc = data.width // 2, data.height // 2\n r = 0.6*data.width // 5\n drawFractalSun(data, canvas, xc, yc, r, data.level, data.depth)\n canvas.create_text(data.width / 2, 0,\n text=\"Level %d Fractal\" % (data.level),\n font=\"Arial \" + str(int(margin / 3)) + \" bold\",\n anchor=\"n\", fill = 'white')\n canvas.create_text(data.width / 2, margin,\n text=\"Use arrows to change level\",\n font=\"Arial \" + str(int(margin / 4)),\n anchor=\"s\", fill = 'white')\n\n\n# this is the mouse event handler\ndef mousePressed(event, data): pass\n\n\n# this is the timer function\ndef timerFired(data): pass\n\n####################################\n# use the run function as-is frame\n####################################\ndef run(width=500, height=500):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(500, 500)\n#################################################\n# Hw10 Test Functions\n#################################################\ndef testFindLargestFile():\n print(\"Testing findLargestFile...\", end=\"\")\n assert (findLargestFile(\"sampleFiles/folderA\") ==\n \"sampleFiles/folderA/folderC/giftwrap.txt\")\n assert (findLargestFile(\"sampleFiles/folderB\") ==\n \"sampleFiles/folderB/folderH/driving.txt\")\n assert (findLargestFile(\"sampleFiles/folderB/folderF\") == \"\")\n print('passed!')\n\ndef testMyJoin():\n print(\"Testing myJoin...\", end=\"\")\n assert(myJoin(['a','b','c'], '-') == 'a-b-c')\n assert(myJoin([1, 2, 3], '@@') == '1@@2@@3')\n assert(myJoin([1, 2, 'c', 'd'], ''))\n assert(myJoin([42], '') == '42')\n print(\"Passed!\")\n\ndef testCreateKnightsTour():\n print(\"Testing createKnightsTour...\", end=\"\")\n #The only n=1 board:\n board0 = [[1]]\n\n #A few different n=5 boards:\n board1 = [\n [ 1, 20, 9, 14, 3 ],\n [ 10, 15, 2, 19, 24 ],\n [ 21, 8, 25, 4, 13 ],\n [ 16, 11, 6, 23, 18 ],\n [ 7, 22, 17, 12, 5 ],\n ]\n\n board2 = [\n [ 1, 18, 23, 12, 7 ],\n [ 24, 13, 8, 17, 22 ],\n [ 19, 2, 25, 6, 11 ],\n [ 14, 9, 4, 21, 16 ],\n [ 3, 20, 15, 10, 5 ],\n ]\n\n board3 = createKnightsTour(5)\n board6 = createKnightsTour(1)\n\n #Our isKnightsTour function from HW5 should return True for each\n assert(isKnightsTour(board0)==True)\n assert(isKnightsTour(board1)==True)\n assert(isKnightsTour(board2)==True)\n assert(isKnightsTour(board3) ==True)\n assert(createKnightsTour(3) == None)\n assert(createKnightsTour(4) == None)\n assert(isKnightsTour(board6) == True)\n assert(createKnightsTour(2) == None)\n assert(createKnightsTour(1) == [[1]])\n print(\"Passed!\")\n\ndef testAll():\n testFindLargestFile()\n testMyJoin()\n testCreateKnightsTour()\n lst = makeExample2DList(8)\n print(lst)\n\n\ndef main():\n testAll()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6369020938873291, "alphanum_fraction": 0.6469601988792419, "avg_line_length": 33.407691955566406, "blob_id": "e14912f9a263824bb8f5392deb4711d5bdb6da86", "content_id": "0628b5ba478c8ac019a6ed9991c14e3b01217b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9002, "license_type": "no_license", "max_line_length": 160, "num_lines": 260, "path": "/15112-CMU/FIFAworldcup copy/FBPlayer.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport os\nimport time\n\nfrom const import *\nfrom functions import *\nfrom Enum import *\nfrom Ball import BALL\n\nSIZE = PLAYER_SIZE\n\nPATH = \"assets/images/player/\"\n\nMARGIN_TOP = int(TABLE_SCORE_HEIGHT + BACKGROUND_HEIGHT / 2 - GOAL_WIDTH / 2)\n\nDISTANCE_NEW_FRAME = 20\n\nINTERVAL = 10\n\nclass FBPlayer(pygame.sprite.Sprite):\n def __init__(self, team, centerx, centery):\n super(FBPlayer, self).__init__()\n\n self.defaultX = centerx\n self.defaultY = centery\n\n self.index = 1\n\n self.state = State.FREE\n\n self.sprites = {\n UP: [],\n DOWN: [],\n RIGHT: [],\n LEFT: [],\n UP_LEFT: [],\n UP_RIGHT: [],\n DOWN_LEFT: [],\n DOWN_RIGHT: [],\n }\n\n self.totalDistance = 0\n\n # load images for sprites\n loadImages(self.sprites[UP], PATH + team + '/up/')\n loadImages(self.sprites[DOWN], PATH + team + '/down/')\n loadImages(self.sprites[RIGHT], PATH + team + '/right/')\n loadImages(self.sprites[LEFT], PATH + team + '/left/')\n loadImages(self.sprites[UP_LEFT], PATH + team + '/up-left/')\n loadImages(self.sprites[UP_RIGHT], PATH + team + '/up-right/')\n loadImages(self.sprites[DOWN_LEFT], PATH + team + '/down-left/')\n loadImages(self.sprites[DOWN_RIGHT], PATH + team + '/down-right/')\n\n if team is BLUE_TEAM:\n self.direction = RIGHT\n else: \n self.direction = LEFT\n\n self.images = self.sprites[self.direction]\n self.image = self.images[1]\n\n self.rect = pygame.Rect(centerx - SIZE / 2, centery - SIZE / 2, SIZE, SIZE)\n self.controlRect = getControlRect(self.rect.centerx, self.rect.centery)\n\n self.directVector = pygame.Vector2(0, 0)\n\n def update(self):\n self.controlRect = getControlRect(self.rect.centerx, self.rect.centery)\n\n def move(self, direction, speed = SPEED_DEFAULT):\n if BALL.owner != None:\n self.updateBallPosition()\n self.updateDirection(direction)\n\n # update new frame\n if self.totalDistance > DISTANCE_NEW_FRAME:\n self.image = self.images[self.index]\n self.totalDistance = 0\n self.index += 1\n\n if self.index >= len(self.images):\n self.index = 0\n\n # self.totalDistance += speed\n\n if direction is UP and RECT_GAME.contains(self.controlRect.move(0, -speed)):\n self.rect = self.rect.move(0, -speed)\n elif direction is DOWN and RECT_GAME.contains(self.controlRect.move(0, speed)):\n self.rect = self.rect.move(0, speed)\n elif direction is LEFT and RECT_GAME.contains(self.controlRect.move(-speed, 0)):\n self.rect = self.rect.move(-speed, 0)\n elif direction is RIGHT and RECT_GAME.contains(self.controlRect.move(speed, 0)):\n self.rect = self.rect.move(speed, 0)\n elif direction is UP_LEFT and RECT_GAME.contains(self.controlRect.move(- speed / SQRT_2, - speed / SQRT_2)):\n self.rect = self.rect.move(- speed / SQRT_2, - speed / SQRT_2)\n elif direction is UP_RIGHT and RECT_GAME.contains(self.controlRect.move(speed / SQRT_2, - speed / SQRT_2)):\n self.rect = self.rect.move(speed / SQRT_2, - speed / SQRT_2)\n elif direction is DOWN_LEFT and RECT_GAME.contains(self.controlRect.move( - speed / SQRT_2, speed / SQRT_2)):\n self.rect = self.rect.move( - speed / SQRT_2, speed / SQRT_2)\n elif direction is DOWN_RIGHT and RECT_GAME.contains(self.controlRect.move( speed / SQRT_2, speed / SQRT_2)):\n self.rect = self.rect.move( speed / SQRT_2, speed / SQRT_2)\n \n #if BALL.owner == self:\n # if BALL.owner != None:\n # self.updateBallPosition()\n\n def updateDirection(self, direction):\n # if self.direction is direction:\n # return None\n\n self.direction = direction\n self.images = self.sprites[self.direction]\n # self.totalDistance = 0\n # self.index = 1\n self.image = self.images[0]\n\n def runFindBall(self):\n if BALL.owner == self:\n # return None\n pass\n \n # if BALL.owner == None and BALL.velocity.length() > 8:\n # return None\n\n distances = {\n UP: caculateDistance((self.controlRect.centerx, self.controlRect.centery - SPEED_DEFAULT), BALL.rect),\n DOWN: caculateDistance((self.controlRect.centerx, self.controlRect.centery + SPEED_DEFAULT), BALL.rect),\n RIGHT: caculateDistance((self.controlRect.centerx + SPEED_DEFAULT, self.controlRect.centery), BALL.rect),\n LEFT: caculateDistance((self.controlRect.centerx - SPEED_DEFAULT, self.controlRect.centery), BALL.rect),\n UP_LEFT: caculateDistance((self.controlRect.centerx - SPEED_DEFAULT / SQRT_2, self.controlRect.centery - SPEED_DEFAULT / SQRT_2), BALL.rect),\n UP_RIGHT: caculateDistance((self.controlRect.centerx + SPEED_DEFAULT / SQRT_2, self.controlRect.centery - SPEED_DEFAULT / SQRT_2), BALL.rect),\n DOWN_LEFT: caculateDistance((self.controlRect.centerx - SPEED_DEFAULT / SQRT_2, self.controlRect.centery + SPEED_DEFAULT / SQRT_2), BALL.rect),\n DOWN_RIGHT: caculateDistance((self.controlRect.centerx + SPEED_DEFAULT / SQRT_2, self.controlRect.centery + SPEED_DEFAULT / SQRT_2), BALL.rect),\n }\n\n # direction = UP\n # distance = distances[UP]\n\n direction = DOWN\n distance = distances[DOWN]\n\n for key, value in distances.items():\n if value < distance:\n distance = value\n direction = key\n # print(direction)\n # print(distance)\n \n # if distance < 1:\n # # return None\n\n self.move(direction)\n\n\n if BALL.owner != None:\n # 当球受到蓝方控制时\n # 被抢断的概率\n if random.randint(1, 10000) == 1:\n self.takeBall()\n self.state = State.COMPUTER\n else:\n pass\n # else:\n elif BALL.owner == None:\n # 如果球没有人控制 红方拿球\n self.takeBall()\n self.state = State.COMPUTER\n\n def takeBall(self):\n if BALL.owner == self:\n return None\n\n if self.controlRect.colliderect(BALL.rect):\n BALL.owner = self\n\n def updateBallPosition(self):\n # if BALL.owner != self:\n # return None\n if BALL.owner == self:\n # BALL.rect.centerx = self.rect.centerx\n # BALL.rect.centery = self.rect.centery + SIZE / 3\n\n # if (self.direction is LEFT):\n # BALL.rect.centerx = self.rect.centerx\n # elif (self.direction is RIGHT):\n # BALL.rect.centerx = self.rect.centerx\n # elif (self.direction is UP):\n # BALL.rect.centery = self.rect.centery\n # elif (self.direction is DOWN):\n # BALL.rect.centery = self.rect.centery\n\n # BALL.rect.centerx = self.rect.centerx\n # BALL.rect.centery = self.rect.centery + SIZE / 3\n #\n if (self.direction is LEFT):\n BALL.rect.centerx = self.rect.centerx - SIZE / 5\n elif (self.direction is RIGHT):\n BALL.rect.centerx = self.rect.centerx + SIZE / 5\n elif (self.direction is UP):\n BALL.rect.centery = self.rect.centery + SIZE / 4\n elif (self.direction is DOWN):\n BALL.rect.centery = self.rect.centery + SIZE / 4\n else:\n BALL.rect.centerx = self.rect.centerx\n BALL.rect.centery = self.rect.centery + SIZE / 3\n\n def shoot(self):\n if BALL.owner == self:\n BALL.shoot(convertDirectVector(self.direction))\n # else:\n # pass\n\n def performAction(self, team):\n if self.state == State.ATTACK:\n coefficient = BALL.rect.centerx / BACKGROUND_WIDTH * 2\n if coefficient < 0.8:\n coefficient = 0.8\n\n shouldMoveRight = self.rect.centerx - self.defaultX * coefficient + INTERVAL * coefficient < team.player.rect.centerx - team.player.defaultX * coefficient\n shouldMoveLeft = self.rect.centerx - self.defaultX * coefficient - INTERVAL * coefficient > team.player.rect.centerx - team.player.defaultX * coefficient\n shouldMoveUp = self.rect.centery - self.defaultY > INTERVAL * coefficient\n shouldMoveDown = self.rect.centery - self.defaultY < - INTERVAL * coefficient\n\n if shouldMoveUp and shouldMoveLeft:\n self.move(UP_LEFT)\n elif shouldMoveUp and shouldMoveRight:\n self.move(UP_RIGHT)\n elif shouldMoveDown and shouldMoveLeft:\n self.move(DOWN_LEFT)\n elif shouldMoveDown and shouldMoveRight:\n self.move(DOWN_RIGHT)\n elif shouldMoveUp:\n self.move(UP)\n elif shouldMoveDown:\n self.move(DOWN)\n elif shouldMoveRight and self.rect.centerx < BACKGROUND_WIDTH - 100:\n self.move(RIGHT)\n elif shouldMoveLeft and self.rect.centerx > 100:\n self.move(LEFT)\n elif self.state == State.FIND_BALL:\n self.runFindBall()\n elif self.state == State.COMPUTER:\n self.move(LEFT)\n\n if (caculateDistance(self.rect.center, pygame.Rect(0 ,MARGIN_TOP ,GAP_SIZE_WIDTH + 10, GOAL_WIDTH ).center) < 50000):\n self.shoot()\n # pass\n \n \n\n\ndef loadImages(list, path):\n for filename in os.listdir(path):\n if filename.endswith(\".png\"):\n image = pygame.image.load(path + filename)\n resizeImage = pygame.transform.scale(image, (SIZE, SIZE))\n list.append(resizeImage)\n\ndef getControlRect(centerx, centery):\n return pygame.Rect(centerx - SIZE / 4, centery, SIZE / 2, SIZE / 2)\n\n\n" }, { "alpha_fraction": 0.4610297679901123, "alphanum_fraction": 0.491733580827713, "avg_line_length": 21.284210205078125, "blob_id": "95eb7244d6caf6436bbe9c73b64ebbda440c46da", "content_id": "32167c7b7ee9e1e666b0b8f02c445abeac390210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2117, "license_type": "no_license", "max_line_length": 77, "num_lines": 95, "path": "/15112-CMU/paractice/ex1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def substrCount(n, s):\n counter = 0\n # following code count palindrome len of Palindrome str longer than 2\n for i in range(n):\n offset = 1\n while i - offset >= 0 and i + offset < n:\n if s[i - offset] == s[i + offset]:\n counter += 1\n offset += 1\n continue\n else:\n break\n # following code counter repeats like \"aa\"\n repeats = 0\n for i in range(n - 1):\n if s[i] == s[i + 1]:\n repeats += 1\n res = counter + repeats + n # n is the palindrome with len 1\n return res\n\n\n# print(substrCount(3, \"aba\"))\n# print(substrCount(5, 'asasd'))\n# print(substrCount(7, \"abcbaba\"))\n# print(substrCount(8, \"mnonopoo\"))\n# print(substrCount(7, 'abcbaba'))\n# print(substrCount(5, \"nnnnn\"))\n# print(substrCount(4, \"nnnn\"))\n\ndef function1():\n try:\n 1/0\n print(\"Yeah!\")\n except:\n print(\"This function has run time error\")\n\n# function1()\n\n\n\ndef quicksort(arr):\n if len(arr) <= 1:\n return arr\n pivot = [arr[0]]\n left = quicksort([arr[i] for i in range(len(arr)) if arr[i] < pivot[0]])\n right = quicksort([arr[i] for i in range(len(arr)) if arr[i] > pivot[0]])\n return left + pivot + right\n\nprint(quicksort([3, 5, 2, 1]))\nprint(quicksort([13, 3434, 35, 4345]))\nprint(quicksort([0]))\nprint(quicksort([1, 2, 3, 4, 5]))\n\n\n\nprint(\" \")\nprint(\"haha merge sort !\")\n\ndef merge(arr, lo, mid, hi):\n B = [0] * (hi - lo)\n i = lo\n j = mid\n k = 0\n while (i < mid) and (j < hi):\n if arr[i] <= arr[j]:\n B[k] = arr[i]\n i += 1\n else:\n B[k] = arr[j]\n j += 1\n k += 1\n while i < mid:\n B[k] = arr[i]\n i += 1\n k += 1\n while j < hi:\n B[k] = arr[j]\n j += 1\n k += 1\n for k in range(0, hi - lo):\n arr[lo + k] = B[k]\n\n\ndef mergesort(arr, lo, hi):\n if (hi - lo) <= 1:\n return\n mid = lo + (hi - lo) // 2\n mergesort(arr, lo, mid)\n mergesort(arr, mid, hi)\n merge(arr, lo, mid, hi)\n\n\narr = [3, 2, 1]\n(mergesort(arr, 0, 3))\nprint(arr)\n" }, { "alpha_fraction": 0.6182352304458618, "alphanum_fraction": 0.6358639001846313, "avg_line_length": 37.92620086669922, "blob_id": "2735545f7573e4fa1f94bb1f6d7897d649823a4f", "content_id": "6b8fb70bd41d0d9e1627a5c3de6097f8a65e7cc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10551, "license_type": "no_license", "max_line_length": 94, "num_lines": 271, "path": "/15112-CMU/Design Proposal and TP/TP/BlueAndRedTeam.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this file is mainly for class BlueAndRedTeam, class blueTeam, class redTeam\nfrom playerAndComputer import *\nimport pygame\n\n\n# this function get the element value of game\ndef getElem():\n marginWidth = 35\n marginHeight = 17\n scoreTableHeight = 60\n topLeftX = marginWidth\n topLeftY = scoreTableHeight + marginHeight\n displayWidth = 1100\n displayHeight = 620\n fieldWidth = displayWidth - 2 * marginWidth\n fieldHeight = displayHeight - 2 * marginHeight\n return topLeftX, topLeftY, fieldWidth, fieldHeight,\\\n marginWidth, marginHeight, scoreTableHeight,\\\n displayWidth, displayHeight\n\n\n# get the player's direction vector\ndef getDirectionVector(direction):\n if direction == 'left':\n vector = pygame.Vector2(-1, 0).normalize()\n return vector\n elif direction == 'right':\n vector = pygame.Vector2(1, 0).normalize()\n return vector\n elif direction == 'up':\n vector = pygame.Vector2(0, -1).normalize()\n return vector\n elif direction == 'down':\n vector = pygame.Vector2(0, 1).normalize()\n return vector\n elif direction == 'upRight':\n vector = pygame.Vector2(1, -1).normalize()\n return vector\n elif direction == 'upLeft':\n vector = pygame.Vector2(-1, -1).normalize()\n return vector\n elif direction == 'downRight':\n vector = pygame.Vector2(1, 1).normalize()\n return vector\n elif direction == 'downLeft':\n vector = pygame.Vector2(-1, 1).normalize()\n return vector\n\n\n# this is the helper function help to calculate the angle between two team players\ndef calculateAngleBetweenTwoPlayers(directionVector, comparingVector):\n return abs(directionVector.angle_to(comparingVector))\n\n\n# this is the helper function to resize the velocity\ndef reSizeVel(passVelVector):\n passVelVectorLen = (passVelVector[0]**2\n + passVelVector[1]**2)**0.5\n if passVelVectorLen > 400:\n passVelVector = passVelVector.normalize() * 30\n return passVelVector\n elif passVelVectorLen > 300:\n passVelVector = passVelVector.normalize() * 25\n return passVelVector\n elif passVelVectorLen > 200:\n passVelVector = passVelVector.normalize() * 15\n return passVelVector\n elif passVelVectorLen > 100:\n passVelVector = passVelVector.normalize() * 14\n return passVelVector\n elif passVelVectorLen <= 100:\n passVelVector = passVelVector.normalize() * 14\n return passVelVector\n\n\n\nclass BlueAndRedTeam(pygame.sprite.Group):\n def __init__(self):\n super(BlueAndRedTeam, self).__init__()\n self.players = []\n self.player = None\n\n # this method choose the closest team player near the ball\n def chooseClosestTeamPlayer(self):\n distanceDict = dict()\n for player in self.players:\n distance = calculateDistanceBetweemBallAndPlayers\\\n (player.rect, ball.rect)\n distanceDict[player] = distance\n minDistance = calculateDistanceBetweemBallAndPlayers\\\n (self.players[0].playerRect.center, ball.rect.center)\n minPlayer = self.players[0]\n for player, distance in distanceDict.items():\n if distance <= minDistance:\n minDistance = distance\n minPlayer = player\n return minPlayer\n\n # this method change the controlled player\n def changePlayer(self):\n self.player.runningSpeed = 6\n if ball.owner != self:\n self.player = self.chooseClosestTeamPlayer()\n self.player.gameState = GameState.free\n\n\n # the method pass ball to its team members\n def passBallToTeamMembers(self):\n self.player.runningSpeed = 6\n curDirection = self.player.direction\n direcVect = getDirectionVector(curDirection)\n targetPlayers = []\n angleDict = dict()\n for player in self.players:\n if player != self.player:\n targetPlayers += [player]\n for player in targetPlayers:\n comparingVector = pygame.Vector2(player.rect.x - self.player.rect.x,\n player.rect.y - self.player.rect.y)\n angle = calculateAngleBetweenTwoPlayers(direcVect, comparingVector)\n angleDict[player] = angle\n targetPlayer = targetPlayers[0]\n vector0 = pygame.Vector2(targetPlayer.rect.x - self.player.rect.x,\n targetPlayer.rect.y - self.player.rect.y)\n minAngle = calculateAngleBetweenTwoPlayers(direcVect, vector0)\n for player, angle in angleDict.items():\n if angle <= minAngle:\n minAngle = angle\n targetPlayer = player\n if minAngle < 90:\n passVelVector = pygame.Vector2\\\n (targetPlayer.rect.x - self.player.rect.x,\n targetPlayer.rect.y - self.player.rect.y)\n passVelVector = reSizeVel(passVelVector)\n self.player = targetPlayer\n ball.passBall(passVelVector)\n self.player.gameState = GameState.free\n else:\n passVelVector = direcVect * 15\n ball.passBall(passVelVector)\n self.player.gameState = GameState.free\n\n\n# return coordiinate of middle fielder's x and y\ndef getMiddleCoordinate():\n CoordinateOfMidfielderX = getElem()[4] + (2 / 7) * getElem()[2]\n CoordinateOfMidMidfielderY = getElem()[6] + getElem()[5] + getElem()[3] / 2\n return CoordinateOfMidfielderX, CoordinateOfMidMidfielderY\n\n# return coordiinate of defender's x and y\ndef getDefendersCoordinate():\n CoordinateOfDefenderX = getElem()[4] + (1 / 7) * getElem()[2]\n CoordinateOfMidMidfielderY = getElem()[6] + getElem()[5] + getElem()[3] / 2\n CoordinateOfDefenderY = CoordinateOfMidMidfielderY - (1 / 6) * getElem()[3]\n CoordinateOfDefenderX1 = CoordinateOfDefenderX\n CoordinateOfDefenderY1 = CoordinateOfMidMidfielderY + (1 / 6) * getElem()[3]\n return CoordinateOfDefenderX, CoordinateOfDefenderY,\\\n CoordinateOfDefenderX1, CoordinateOfDefenderY1\n\n# return coordiinate of striker's x and y\ndef getStrikersCoordinate():\n CoordinateOfStrikerX = getElem()[4] + (3 / 7) * getElem()[2]\n CoordinateOfMidMidfielderY = getElem()[6] + getElem()[5] + getElem()[3] / 2\n CoordinateOfStrikerY = CoordinateOfMidMidfielderY - (1 / 6) * getElem()[3]\n CoordinateOfStrikerX1 = CoordinateOfStrikerX\n CoordinateOfStrikerY1 = CoordinateOfMidMidfielderY + (1 / 6) * getElem()[3]\n return CoordinateOfStrikerX, CoordinateOfStrikerY,\\\n CoordinateOfStrikerX1, CoordinateOfStrikerY1\n\n\n\n# helper functioin to add player objects\ndef addPlayersIntoBlueTeam():\n res = [\n # two defenders\n Player(getDefendersCoordinate()[0], getDefendersCoordinate()[1]),\n Player(getDefendersCoordinate()[2], getDefendersCoordinate()[3]),\n # three midfielders\n Player(getMiddleCoordinate()[0],\n getMiddleCoordinate()[1] - (1 / 3) * getElem()[3]),\n Player(getMiddleCoordinate()[0], getMiddleCoordinate()[1]),\n Player(getMiddleCoordinate()[0],\n getMiddleCoordinate()[1] + (1 / 3) * getElem()[3]),\n # two strikers\n Player(getStrikersCoordinate()[0], getStrikersCoordinate()[1]),\n Player(getStrikersCoordinate()[2], getStrikersCoordinate()[3]),\n ]\n return res\n\n\n# helper functioin to add player objects\ndef addPlayersIntoRedTeam():\n res = [\n # two defenders\n Computer(getElem()[7] - getDefendersCoordinate()[0], getDefendersCoordinate()[1]),\n Computer(getElem()[7] - getDefendersCoordinate()[0], getDefendersCoordinate()[3]),\n # three midfielders\n Computer(getElem()[7] - getMiddleCoordinate()[0],\n getMiddleCoordinate()[1] - (1 / 3) * getElem()[3]),\n Computer(getElem()[7] - getMiddleCoordinate()[0], getMiddleCoordinate()[1]),\n Computer(getElem()[7] - getMiddleCoordinate()[0],\n getMiddleCoordinate()[1] + (1 / 3) * getElem()[3]),\n # two strikers\n Computer(getElem()[7] - getStrikersCoordinate()[0], getStrikersCoordinate()[1]),\n Computer(getElem()[7] - getStrikersCoordinate()[0], getStrikersCoordinate()[3]),\n ]\n return res\n\n\n# this is a group class\nclass BlueTeam(BlueAndRedTeam):\n def __init__(self):\n super(BlueTeam, self).__init__()\n self.players = addPlayersIntoBlueTeam()\n for player in self.players:\n self.add(player)\n self.player = self.players[-1]\n\n # control method for blue team\n def controlPlayer(self):\n self.player.control()\n self.player.gameState = GameState.free\n for player in self.players:\n if player != self.player:\n player.gameState = GameState.attacking\n aiPlayers = []\n for player in self.players:\n if player != self.player:\n aiPlayers += [player]\n for player in aiPlayers:\n player.teamPlayerAI(self)\n\n # this is the speed up method for blueTeam Player\n def speedup(self):\n for player in self.players:\n if player == self.player:\n player.runningSpeed = 12\n\n\n # this method basically draw the arrow on the player has been controlled\n def drawArrow(self, gameDisplay):\n sizeOfArrow = 15\n FIFAPlayerSize = 60\n arrowImg = pygame.image.load(\"assets/images/others/sort-down-fill.png\")\n resizedArrowImg = pygame.transform.scale(arrowImg, (sizeOfArrow, sizeOfArrow))\n gameDisplay.blit(resizedArrowImg, (self.player.rect.centerx - sizeOfArrow / 2,\n self.player.rect.centery - 2 / 3 * FIFAPlayerSize))\n\n\nclass RedTeam(BlueAndRedTeam):\n def __init__(self):\n super(RedTeam, self).__init__()\n self.players = addPlayersIntoRedTeam()\n for player in self.players:\n self.add(player)\n self.player = self.players[-1]\n\n\n # control method for red team\n def controlAI(self):\n if ball.owner != self.player:\n for player in self.players:\n if player != self.player:\n player.gameState = GameState.attacking\n self.player.gameState = GameState.free\n self.player = self.chooseClosestTeamPlayer()\n self.player.gameState = GameState.findBall\n elif ball.owner == self.player:\n self.player.gameState = GameState.computer\n for player in self.players:\n player.teamPlayerAI(self)\n\n\n" }, { "alpha_fraction": 0.6272727251052856, "alphanum_fraction": 0.668181836605072, "avg_line_length": 30.428571701049805, "blob_id": "d8c80bb7a5f5240aa8727881691298c0131ce1ca", "content_id": "e187b2d892edfa7c79d7ac8aa4092c5b2704e687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 880, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/15112-CMU/112-opencv-tutorial-master/colorDetection.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\nwindow_name = \"Webcam!\"\ncam_index = 1 #my computer's camera is index 1, usually it's 0\ncv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\ncap = cv2.VideoCapture(cam_index)\ncap.open(cam_index)\n\n#initialize the range of colors you want to track\n#values will be in HSV\nlower_blue = np.array([110, 50, 50])\nupper_blue = np.array([130, 255, 255])\n\nwhile True:\n ret, frame = cap.read()\n if frame is not None:\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #convert to HSV\n mask = cv2.inRange(hsv, lower_blue, upper_blue) #find pixels in range\n frame = cv2.bitwise_and(frame, frame, mask=mask) #zero out pixels not in range\n cv2.imshow(window_name, frame)\n k = cv2.waitKey(10) & 0xFF\n if k == 27: #ESC key quits the program\n cv2.destroyAllWindows()\n cap.release()\n break\n" }, { "alpha_fraction": 0.5809788703918457, "alphanum_fraction": 0.5920950770378113, "avg_line_length": 35.38764190673828, "blob_id": "3f21134f7bd50d0048e1d4bc63c6812740ed4cdc", "content_id": "93c6affca0e1e00366c073f7466a586af7dcffce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6477, "license_type": "no_license", "max_line_length": 89, "num_lines": 178, "path": "/15112-CMU/Design Proposal and TP/TP/__init__.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this is the __init__ file for this game and main logic of this game is in this file\nfrom BlueAndRedTeam import *\nfrom FIFAplayer import *\nfrom music import *\nfrom ControlBoard import *\nimport time\nimport datetime\n\n\n# cite the structure of the framework of this game is from 15112 TA's code\n# Lukas Pygame templatem, besides the images of players and\n# balls in this game are downloaded from\n# https://pan.baidu.com/s/1Q_b6DiPq_sH870pCdONKkg and\n# the extraction code is: mz8q\n# the background music is downloaded from\n# http://fcsongs.com/UEFA_Champions_League_-_Main_Theme.html\n\n\nclass MiniFIFAOnline3(object):\n # the init function for mini FIFAonline3\n def __init__(self):\n self.blueTeam = BlueTeam()\n self.redTeam = RedTeam()\n self.music = BackGroundMusic()\n self.gameControl = GameControl()\n self.isGameOver = False\n self.fps = 200\n self.timeCounter = 180\n self.counterText = 'Time Left 3:00'\n # this font is from pygame documentation font example\n # https://github.com/search?q=pygame.font.SysFont&type=Code\n self.font = pygame.font.SysFont('Arial', 60)\n self.font1 = pygame.font.SysFont('comicsansms', 100)\n\n # this is the repr function\n def __repr__(self):\n return \"This game is mini-FIFAonline3!!!\"\n\n\n # new game method\n def newGame(self):\n self.blueTeam = BlueTeam()\n self.redTeam = RedTeam()\n ball.generateNewBall()\n\n\n # restart game method\n def restartGame(self):\n self.newGame()\n self.gameControl.blueTeamScore = 0\n self.gameControl.redTeamScore = 0\n self.timeCounter = 180\n\n\n # this is the keypress handler\n def keypressed(self, event):\n if event.key == pygame.K_s:\n if ball.owner == self.blueTeam.player:\n self.blueTeam.passBallToTeamMembers()\n elif ball.owner != self.blueTeam.player:\n self.blueTeam.changePlayer()\n elif event.key == pygame.K_d:\n if ball.owner == self.blueTeam.player:\n self.blueTeam.player.shoot()\n elif ball.owner != self.blueTeam.player:\n self.blueTeam.player.getBall()\n elif event.key == pygame.K_a:\n if ball.owner == self.blueTeam.player:\n self.blueTeam.player.crossBall()\n elif event.key == pygame.K_e:\n if ball.owner == self.blueTeam.player:\n self.blueTeam.speedup()\n elif event.key == pygame.K_r:\n self.restartGame()\n\n\n # this is the mouse press handler\n def mousepressed(self, event):\n pass\n\n\n # check whether team players have goal\n def checkWhetherIsGoal(self):\n if self.gameControl.teamISGoal():\n self.newGame()\n\n\n # this function generate game winner\n def generateWinner(self, blueTeamScore, redTeamScore, gameDisplay):\n for player in self.blueTeam:\n self.blueTeam.remove(player)\n for player in self.redTeam:\n self.redTeam.remove(player)\n ball.generateNewBall()\n if blueTeamScore > redTeamScore:\n text = \"BLue Team Win !!!\"\n gameDisplay.blit(self.font1.render(text,\n True, Colors.BlueViolet), (200, 300))\n elif blueTeamScore < redTeamScore:\n text = \"Red Team Win !!!\"\n gameDisplay.blit(self.font1.render(text,\n True, Colors.Coral), (200, 300))\n else:\n text = \"It Was a Tie !!!\"\n gameDisplay.blit(self.font1.render(text,\n True, Colors.GoldenRod), (200, 300))\n\n\n # this method update the timeCounter and counter text value each second\n def updateTimeCounterAndText(self):\n self.counterText = str(\"Time Left \") + \\\n str(datetime.timedelta(seconds=self.timeCounter))[3:]\n self.timeCounter -= 1\n return self.timeCounter, self.counterText\n\n\n # show How much time left for this game\n def showTimeLeft(self, gameDisplay):\n gameDisplay.blit(self.font.render(self.counterText,\n True, Colors.Black), (50, 5))\n\n # this is method is to check whether to generate winner or continue to show time left\n def checkToGenerateWinner(self, gameDisplay):\n if self.timeCounter > 0:\n self.showTimeLeft(gameDisplay)\n else:\n self.generateWinner(self.gameControl.blueTeamScore,\n self.gameControl.redTeamScore, gameDisplay)\n\n # this is the main loop\n def run(self):\n pygame.init()\n pygame.time.set_timer(pygame.USEREVENT, 1000)\n self.music.loadMusic()\n gameDisplay = pygame.display.set_mode((getGameDimension()[0],\n getGameDimension()[1]))\n pygame.display.set_caption(\"MiniFIFAOnline3\")\n spriteBall = pygame.sprite.Group(ball)\n clock = pygame.time.Clock()\n while not self.isGameOver:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.isGameOver = True\n # the following if statement will be called once each second\n if event.type == pygame.USEREVENT:\n if self.timeCounter > 0:\n self.updateTimeCounterAndText()\n if event.type == pygame.KEYDOWN:\n self.keypressed(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.mousepressed(event)\n background = Background()\n gameDisplay.blit(background, (0, 0))\n self.blueTeam.controlPlayer()\n self.redTeam.controlAI()\n self.blueTeam.update()\n self.redTeam.update()\n ball.update()\n # check whether team player goal\n self.checkWhetherIsGoal()\n spriteBall.draw(gameDisplay)\n self.blueTeam.draw(gameDisplay)\n self.blueTeam.drawArrow(gameDisplay)\n self.redTeam.draw(gameDisplay)\n self.gameControl.drawScore(gameDisplay)\n self.checkToGenerateWinner(gameDisplay)\n # parameter background processing\n pygame.display.update()\n clock.tick(self.fps)\n\n\n# the main function\ndef main():\n game = MiniFIFAOnline3()\n game.run()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6563732028007507, "alphanum_fraction": 0.6675426959991455, "avg_line_length": 28.288461685180664, "blob_id": "77dcefdfc45f7bcde8df100398e31c1ebe4252ab", "content_id": "db345ac7a9fc76f9d2cf9930c84498a36c67b2ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1522, "license_type": "no_license", "max_line_length": 113, "num_lines": 52, "path": "/15112-CMU/FIFAworldcup copy2/PlayAgainBoarding.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport colors\nfrom const import *\nfrom Text import *\n\npygame.init()\n\nWIDTH = WIDTH_OF_PLAY_AGAIN_BOARDING\nHEIGHT = HEIGHT_OF_PLAY_AGAIN_BOARDIG\n\nMARGIN = WIDTH / 6\nBUTTON_WIDTH = 100\nBUTTON_HEIGHT = 50\n\nclass PlayAgainBoarding(pygame.Surface):\n def __init__(self):\n super(PlayAgainBoarding, self).__init__((WIDTH, HEIGHT))\n self.fill(colors.LightGreen)\n pygame.draw.rect(self, colors.LightGreen, (0, 0, WIDTH, HEIGHT))\n\n Text.showTextInPlayGain('Do you want to play again?', colors.White, self, HEIGHT / 3)\n\n y = HEIGHT * 2 / 3\n # yes\n titleSurf, titleRect = Text.makeTextObject('YES', colors.Green)\n titleRect.center = (MARGIN + BUTTON_WIDTH / 2, y)\n\n self.yes_rect = pygame.Rect(MARGIN, y - BUTTON_HEIGHT / 2, BUTTON_WIDTH, BUTTON_HEIGHT)\n \n pygame.draw.rect(self, colors.White, self.yes_rect)\n self.blit(titleSurf, titleRect)\n\n # no\n titleSurf, titleRect = Text.makeTextObject('NO', colors.Red)\n titleRect.center = (WIDTH - MARGIN - BUTTON_WIDTH / 2, y)\n \n self.no_rect = pygame.Rect(WIDTH - MARGIN - BUTTON_WIDTH, y - BUTTON_HEIGHT / 2, BUTTON_WIDTH, BUTTON_HEIGHT)\n\n pygame.draw.rect(self, colors.White, self.no_rect)\n self.blit(titleSurf, titleRect)\n\n def click(self, x, y):\n # convert to the coordinates of this surface\n x = x - (WINDOW_WIDTH - WIDTH) / 2\n y = y - (WINDOW_HEIGHT - HEIGHT) / 2\n \n if self.yes_rect.collidepoint(x, y):\n return True\n if self.no_rect.collidepoint(x, y):\n return False\n\n return None" }, { "alpha_fraction": 0.6703755259513855, "alphanum_fraction": 0.7121001482009888, "avg_line_length": 20.46268653869629, "blob_id": "a01ffceff4385dcd50b20ba91fcaa60f97259a84", "content_id": "6f01e9195f04a746a2e43a6c89df9881f0af2829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 102, "num_lines": 67, "path": "/15112-CMU/FIFAworldcup copy/const.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\n\npygame.init()\n\nBLUE_TEAM = 'blue-team'\nRED_TEAM = 'red-team'\n\nUP = 'UP'\nDOWN = 'DOWN'\nRIGHT = 'RIGHT'\nLEFT = 'LEFT'\nUP_LEFT = 'UP_LEFT'\nUP_RIGHT = 'UP_RIGHT'\nDOWN_LEFT = 'DOWN_LEFT'\nDOWN_RIGHT = 'DOWN_RIGHT'\n\nPLAYER_SIZE = 60\n\nSPEED_DEFAULT = 2\nSQRT_2 = 1.4142\n\nFPS = 60\n\nTEAMSIZE = 4\n\nBACKGROUND_WIDTH = 1100\nBACKGROUND_HEIGHT = 620\n\nGAP_SIZE_WIDTH = 35\nGAP_SIZE_HEIGHT = 17\n\nGAME_WIDTH = BACKGROUND_WIDTH - 2 * GAP_SIZE_WIDTH # 1030\nGAME_HEIGHT = BACKGROUND_HEIGHT - 2 * GAP_SIZE_HEIGHT # 586\n\nTABLE_SCORE_WIDTH = BACKGROUND_WIDTH\nTABLE_SCORE_HEIGHT = 60\n\nRECT_GAME = pygame.Rect(GAP_SIZE_WIDTH, TABLE_SCORE_HEIGHT + GAP_SIZE_HEIGHT, GAME_WIDTH, GAME_HEIGHT)\n\nTOP_LEFT = (0 + GAP_SIZE_WIDTH, TABLE_SCORE_HEIGHT + GAP_SIZE_HEIGHT)\n\nWINDOW_WIDTH = BACKGROUND_WIDTH\nWINDOW_HEIGHT = BACKGROUND_HEIGHT + TABLE_SCORE_HEIGHT\n\nTEXT_WIDTH = WINDOW_WIDTH - GAME_WIDTH\nTEXT_HEIGHT = WINDOW_HEIGHT\n\nBORDER_WIDTH = 1\n\nSIZE_ITEM = 40\n\nWIDTH_OF_PLAY_AGAIN_BOARDING = WINDOW_WIDTH / 2\nHEIGHT_OF_PLAY_AGAIN_BOARDIG = WINDOW_HEIGHT / 2\n\nWIDTH_OF_PAUSE_GAME = WINDOW_WIDTH / 3\nHEIGHT_OF_PAUSE_GAME = WINDOW_HEIGHT / 2\n\nFRAMS_TO_RERENDER_NEW_OBJECT = 50\n\nWITH_SETTING_BOARD = WINDOW_WIDTH / 3\nHEIGHT_SETTING_BOARD = WINDOW_HEIGHT\n\nGOAL_WIDTH = 132\n\nFONT30 = pygame.font.Font('assets/fonts/Muli-Regular.ttf', 30)\nFONT30B = pygame.font.Font('assets/fonts/Muli-Bold.ttf', 30)\nFONT60B = pygame.font.Font('assets/fonts/Muli-Bold.ttf', 60)\n" }, { "alpha_fraction": 0.5863757729530334, "alphanum_fraction": 0.6024042963981628, "avg_line_length": 29.76712417602539, "blob_id": "217ef9607c2ae97ce5fc4f453643419612dccc88", "content_id": "f2425ff7e1fb5a7f65d3e991b2528a58af57d452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2246, "license_type": "no_license", "max_line_length": 72, "num_lines": 73, "path": "/15112-CMU/week4 cold cold/case1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n# moves the green square using the arrow keys\ndef init(data):\n data.squareX = data.width/2\n data.squareY = data.height/2\n\ndef mousePressed(event, data):\n pass\n\ndef keyPressed(event, data):\n if event.keysym == \"Up\":\n data.squareY -= 20\n elif event.keysym == \"Down\":\n data.squareY += 20\n elif event.keysym == \"Left\":\n data.squareX -= 20\n elif event.keysym == \"Right\":\n data.squareX += 20\n\ndef redrawAll(canvas, data):\n # draw the text\n canvas.create_text(data.width/2, 20,\n text=\"Example: Arrow Key Movement\")\n canvas.create_text(data.width/2, 40,\n text=\"Pressing the arrow keys moves the square\")\n # draw the square\n size = 50\n canvas.create_rectangle(data.squareX - size, data.squareY - size,\n data.squareX + size, data.squareY + size,\n fill=\"green\")\n\n\n\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 200)\n" }, { "alpha_fraction": 0.6378504633903503, "alphanum_fraction": 0.6386292576789856, "avg_line_length": 31.049999237060547, "blob_id": "05d9406e011b38a68c9e8bc11188318c66d71028", "content_id": "ed08b7d611c8ae4ce429264f8955f494226d4154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1284, "license_type": "no_license", "max_line_length": 97, "num_lines": 40, "path": "/15112-CMU/week10/demo.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import os\ndef printFiles(path):\n # Base Case: a file. Print the path name directly.\n if os.path.isfile(path):\n print(path)\n else:\n # Recursive Case: a directory. Iterate through its files and directories.\n # Note that we have to update the path name to access the inner files!\n for filename in os.listdir(path):\n printFiles(path + \"/\" + filename)\n\nprintFiles(\"sampleFiles\")\n\n# Note: if you see .DS_Store files in the sampleFiles folders, or in the\n# output of your function (as often happens with Macs, in particular),\n# don't worry; this is just a metadata file and can be safely ignored.\n\n\nimport os\ndef listFiles(path):\n if os.path.isfile(path):\n # Base Case: return a list of just this file\n return [ path ]\n else:\n # Recursive Case: create a list of all the recursive results from the files in the folder\n files = [ ]\n for filename in os.listdir(path):\n files += listFiles(path + \"/\" + filename)\n return files\n\nprint(listFiles(\"sampleFiles\"))\n\n\nimport os\ndef removeTmpFiles(path):\n if path.split(\"/\")[-1] == '.DS_Store':\n os.remove(path)\n elif os.path.isdir(path):\n for filename in os.listdir(path):\n removeTmpFiles(path + \"/\" + filename)\n\n\n" }, { "alpha_fraction": 0.675636351108551, "alphanum_fraction": 0.697454571723938, "avg_line_length": 32.53658676147461, "blob_id": "f63d12d6e12d407083e9c61a7e745e0cb7e557af", "content_id": "ea85ac1c3ce3ae59bfae223ece8c62046e30441d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1375, "license_type": "no_license", "max_line_length": 79, "num_lines": 41, "path": "/15112-CMU/112-opencv-tutorial-master/openingImagesResize.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# This code allows us to resize the input image in addition\n# to loading it.\n\n# Import opencv\nimport cv2\n\nwindow_name = \"Images\"\ndesired_size = 500.0 # we want the max dimension to be 500\n\n# Importantly, images are stored as BGR\n# Use the following function to read images.\nimage = cv2.imread(\"lightCat.jpg\")\n# Error checking to make sure that our image actually loaded properly\n# Might fail if we have an invalid file name (or otherwise)\nif image is not None:\n\n # Get the size of the image, which is a numpy array\n size = image.shape\n print size # Just so that we see what format it's in\n # Notice that it's a 1000 x 1000 x 3 image, where the last\n # dimension is the 3 values, BGR, per pixel.\n \n # We now want to resize the image to fit in our window, while\n # maintaining an aspect ratio\n fx = desired_size / size[0]\n fy = desired_size / size[1]\n scale_factor = min(fx, fy)\n\n # Get the resized image. The (0,0) parameter is desired size, which we're\n # setting to zero to let OpenCV calculate it from the scale factors instead\n resized = cv2.resize(image, (0,0), fx = scale_factor, fy = scale_factor)\n\n # Display our loaded image in a window with window_name\n cv2.imshow(window_name, resized)\n # Wait for any key to be pressed\n cv2.waitKey(0)\n\n# Clean up before we exit!\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.41738852858543396, "alphanum_fraction": 0.454924613237381, "avg_line_length": 32.89130401611328, "blob_id": "4dc4c0053ce4174d13eaf81d11fd1d66cde0ea7a", "content_id": "e9b5d779b615e29ab2b09fe35673608808c73d10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3117, "license_type": "no_license", "max_line_length": 87, "num_lines": 92, "path": "/15112-CMU/week2/test2.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\ndef drawCircle(canvas, x0, y0, x1, y1, r, color):\n (cx, cy, r) = (x0 + (x1 - x0)/2, y0 + (y1 - y0)/2, r)\n while r >= 1:\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, fill=color, outline='black')\n r = (2/3) * r\n\n\ndef drawButtonPattern(canvas, size, n):\n canvas.create_rectangle(0, 0, size, size, fill=\"purple\")\n width = size / n\n r = width / 2\n for col in range(n):\n for row in range(n):\n print('col',col,'row', row)\n left = 0 + width * col\n top = 0 + width * row\n right = width * (col + 1)\n bottom = width * (row + 1)\n # print(left, top, right, bottom)\n # color = 'green'\n if (col + row) % 4 == 0:\n color = 'red'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if row % 3 == 0:\n if (col + row) % 4 != 0:\n color = 'green'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n if col % 2 == 1:\n if ((row - 1) % 12) == 0 or ((row - 5) % 12 == 0):\n if (col - 1) % 4 == 0:\n color = 'yellow'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n if ((row - 7) % 12) == 0 or ((row - 11) % 12 == 0):\n if (col - 3) % 4 == 0:\n color = 'yellow'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n if ((row - 2) % 6 == 0) or ((row - 4) % 6 == 0):\n if col % 2 == 1:\n color = 'yellow'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if (row - 1) % 6 == 0 or (row - 5) % 6 == 0:\n if col % 2 == 0:\n color = 'blue'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if (row - 2) % 12 == 0 or (row - 10) % 12 == 0:\n if (col - 4) % 4 == 0:\n color = 'blue'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if (row - 4) % 12 == 0 or (row - 8) % 12 == 0:\n if (col - 2) % 4 == 0:\n color = 'blue'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n\n\n\n\n\n\n # else:\n # color = 'green'\n # drawCircle(canvas, left, top, right, bottom, r, color)\n\n\n\n\ndef runDrawButtonPattern(width, height, n):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # width must equal height\n assert(width == height)\n drawButtonPattern(canvas, width, n)\n root.mainloop()\n\n\ndef testDrawButtonPattern():\n print(\"Testing drawButtonPattern()...\", end=\"\")\n runDrawButtonPattern(400, 400, 10)\n runDrawButtonPattern(300, 300, 5)\n runDrawButtonPattern(250, 250, 25)\n print(\"Done.\")\n\ntestDrawButtonPattern()" }, { "alpha_fraction": 0.591304361820221, "alphanum_fraction": 0.634782612323761, "avg_line_length": 13.5, "blob_id": "0e36c014cb5ffc4e838b91557bdccd626e2635a5", "content_id": "cea6c83663cb15a54e9517cab3cf7531b33207c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 21, "num_lines": 8, "path": "/15112-CMU/FIFAworldcup copy2/Enum.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from enum import Enum\n\nclass State(Enum):\n FIND_BALL = 1\n FREE = 2\n LEAD_TO_GOAL = 3\n ATTACK = 4\n COMPUTER = 5" }, { "alpha_fraction": 0.5618638396263123, "alphanum_fraction": 0.5720425248146057, "avg_line_length": 28.677852630615234, "blob_id": "767e09ab127639b8f8f145f05edbbf388277d4a4", "content_id": "aedf1bd8167360824b3a812ec46b78fe877b05be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4421, "license_type": "no_license", "max_line_length": 73, "num_lines": 149, "path": "/15112-CMU/week8/OOP_Animation_Demo.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# oopyDotsDemo.py\n# starts with betterDotsDemo and adds:\n# * a dotCounter that counts all the instances of Dot or its subclasses\n# * a MovingDot subclass of Dot that scrolls horizontally\n# * a FlashingMovingDot subclass of MovingDot that flashes and moves\n\nimport random\nfrom tkinter import *\n\nclass Dot(object):\n dotCount = 0\n\n # Model\n def __init__(self, x, y):\n Dot.dotCount += 1\n self.x = x\n self.y = y\n self.r = random.randint(20,50)\n self.fill = random.choice([\"pink\",\"orange\",\"yellow\",\"green\",\n \"cyan\",\"purple\"])\n self.clickCount = 0\n\n # View\n def draw(self, canvas):\n canvas.create_oval(self.x-self.r, self.y-self.r,\n self.x+self.r, self.y+self.r,\n fill=self.fill)\n canvas.create_text(self.x, self.y, text=str(self.clickCount))\n\n # Controller\n def containsPoint(self, x, y):\n d = ((self.x - x)**2 + (self.y - y)**2)**0.5\n return (d <= self.r)\n\nclass MovingDot(Dot):\n # Model\n def __init__(self, x, y):\n super().__init__(x, y)\n self.speed = 5 # default initial speed\n\n # Controller\n def move(self, data):\n self.x += self.speed\n if (self.x > data.width):\n self.x = 0\n\nclass FlashingMovingDot(MovingDot):\n # Model\n def __init__(self, x, y):\n super().__init__(x, y)\n self.flashCounter = 0\n self.showFlash = True\n\n # View\n def draw(self, canvas):\n if (self.showFlash):\n canvas.create_rectangle(self.x-self.r, self.y-self.r,\n self.x+self.r, self.y+self.r,\n fill=\"black\")\n super().draw(canvas)\n\n # Controller\n def move(self, data):\n super().move(data)\n self.flashCounter += 1\n if (self.flashCounter == 5):\n self.flashCounter = 0\n self.showFlash = not self.showFlash\n\n# Core animation code\n\ndef init(data):\n data.dots = [ ]\n\ndef mousePressed(event, data):\n for dot in reversed(data.dots):\n if (dot.containsPoint(event.x, event.y)):\n dot.clickCount += 1\n return\n dotType = (len(data.dots) % 3)\n if (dotType == 0):\n data.dots.append(Dot(event.x, event.y))\n elif (dotType == 1):\n data.dots.append(MovingDot(event.x, event.y))\n else:\n data.dots.append(FlashingMovingDot(event.x, event.y))\n\ndef redrawAll(canvas, data):\n for dot in data.dots:\n dot.draw(canvas)\n canvas.create_text(data.width/2, 10, text=\"%d Dots\" % Dot.dotCount)\n\ndef keyPressed(event, data):\n pass\n\ndef timerFired(data):\n for dot in data.dots:\n if type(dot) != Dot:\n dot.move(data)\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n init(data)\n # create the root and the canvas\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(600, 400)" }, { "alpha_fraction": 0.650674045085907, "alphanum_fraction": 0.6645519137382507, "avg_line_length": 29.035715103149414, "blob_id": "c2384e50cded3836d7c08880465f544011d31b84", "content_id": "118fe10027dfd11cf70a4b0106a4f5d464ba39c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2522, "license_type": "no_license", "max_line_length": 74, "num_lines": 84, "path": "/15112-CMU/112-opencv-tutorial-master/erosionAndDilation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# This file is meant to demonstrate erosions and dilations in OpenCV.\n# For some of the theory, please see wikipedia / etc.\n\nimport cv2\nimport numpy as np\n\nWHITE = 255\nBLACK = 0\nTHRESH = 127\n\n# Create our erosion / dilation kernels, which are in this case\n# just a 5 x 5 array of ones.\nkernel = np.ones((5,5), np.uint8)\n\n# Dilation essentially makes white / bright areas bigger, and makes\n# black / dark images smaller. It is done by taking the max of the \n# kernel iterated over the entire image.\ndef dilate(image):\n return cv2.dilate(image, kernel)\n\n# The opposite of dilation, erosion makes dark areas bigger, and bright\n# areas smaller. This is done by taking the min of the kernel, iterated\n# over the entire image.\ndef erode(image):\n return cv2.erode(image, kernel)\n\n# An open operation is simply an erosion followed by a dilation. It\n# is very useful in removing noise, among other things. We can also\n# imagine it as being able to separate disjoint parts of an image\n# connected by only small slivers.\ndef open(image):\n return dilate(erode(image))\n\n# The opposite of open, a close operation is a dilation followed by\n# an erosion. This is often useful for closing small holes inside\n# various objects.\ndef close(image):\n return erode(dilate(image))\n\n# Our simple threshold function from before.\ndef threshold(image):\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n _, thresholded = cv2.threshold(grey, THRESH, WHITE, cv2.THRESH_BINARY)\n return thresholded\n\ndef main():\n\n window_name = \"Webcam!\"\n\n cam_index = 0\n cv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\n\n cap = cv2.VideoCapture(cam_index)\n cap.open(cam_index)\n\n while True:\n\n ret, frame = cap.read()\n\n if frame is not None:\n # First we do a threshold on our image\n thresh = threshold(frame)\n cv2.imshow(window_name, thresh)\n\n # We demonstrate a couple different versions of\n # the same thresholded image.\n\n # We should notice that the erosion a lot of the noise that\n # was left over from the thresholding operation.\n cv2.imshow(\"Erode\", erode(thresh))\n cv2.imshow(\"Dilate\", dilate(thresh))\n cv2.imshow(\"Open\", open(thresh))\n cv2.imshow(\"Close\", close(thresh))\n \n k = cv2.waitKey(1) & 0xFF\n if k == 27: # Escape key\n cv2.destroyAllWindows()\n cap.release()\n break\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.43772727251052856, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 22.923913955688477, "blob_id": "ddce07f323052b688205d36c6d65b8d83cb437a5", "content_id": "17f3b84e1967ce60e9431bee677e9c4d510debdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2200, "license_type": "no_license", "max_line_length": 95, "num_lines": 92, "path": "/15112-CMU/week9/Q6.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def generateValidParentheses(n):\n pass\n\ndef testGenerateValidParentheses():\n print(\"Testing generateValidParentheses...\", end=\"\")\n assert(generateValidParentheses(4) == { \"(())\", \"()()\" })\n assert(generateValidParentheses(6) == { \"((()))\", \"()(())\", \"(())()\", \"(()())\", \"()()()\" })\n assert(generateValidParentheses(5) == set())\n assert(generateValidParentheses(0) == set())\n print(\"Passed!\")\n\n\n# print(checkStr(\"(())\"))\n# print(checkStr(\"())(\"))\n# print(checkStr(\")()(\"))\n# print(generateValidParentheses(4))\n# print(generateValidParentheses(6))\n# print(generateValidParentheses(5))\n# print(generateValidParentheses(0))\n\ndef print_all_parens(n):\n def print_parens(left, right, s):\n if right == n/2:\n print(s)\n return\n if left < n/2:\n print_parens(left + 1, right, s + \"(\")\n if right < left:\n print_parens(left, right + 1, s + \")\")\n\n print_parens(0, 0, \"\")\n\nprint(print_all_parens(6))\n\n\n\n\n\nclass Solution(object):\n def generateParenthesis(self, n):\n def generate(A = []):\n if len(A) == 2*n:\n if valid(A):\n ans.append(\"\".join(A))\n else:\n A.append('(')\n generate(A)\n A.pop()\n A.append(')')\n generate(A)\n A.pop()\n\n def valid(A):\n bal = 0\n for c in A:\n if c == '(': bal += 1\n else: bal -= 1\n if bal < 0: return False\n return bal == 0\n\n ans = []\n generate()\n return ans\n\ndef generateParenthesis(n):\n def generate(A = []):\n if len(A) == n:\n if valid(A):\n ans.append(\"\".join(A))\n else:\n A.append('(')\n generate(A)\n A.pop()\n A.append(')')\n generate(A)\n A.pop()\n\n def valid(A):\n bal = 0\n for c in A:\n if c == '(':\n bal += 1\n else: bal -= 1\n if bal < 0:\n return False\n return bal == 0\n\n ans = []\n generate()\n return ans\n\n# print(generateParenthesis(6))" }, { "alpha_fraction": 0.5448943376541138, "alphanum_fraction": 0.5721830725669861, "avg_line_length": 22.204082489013672, "blob_id": "180fd5e6d5a3e091b779d00e52eacfed6286e88d", "content_id": "475894cd9250c0e098ed9d7c092d7606e8bdb530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 54, "num_lines": 49, "path": "/15112-CMU/week2/test isSmith.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def isPrime(n):\n if (n < 2):\n return False\n maxFactor = round(n**0.5)\n for factor in range(2, maxFactor+1):\n if (n % factor == 0):\n return False\n return True\n\n\ndef sum_digits(n):\n sum = 0\n while n:\n sum += n % 10\n n //= 10\n return sum\n\n\ndef isSmithNumber(n):\n if isPrime(n):\n return False\n sumofdigit = sum_digits(n)\n sumOfPrimeFactor = 0\n for factor in range(2, n + 1):\n if isPrime(factor):\n while n % factor == 0:\n sumOfPrimeFactor += sum_digits(factor)\n n = n / factor\n if sumOfPrimeFactor == sumofdigit:\n return True\n else:\n return False\n\n\ndef testIsSmithNumber():\n print(\"Testing isSmithNumber()...\", end=\"\")\n assert(isSmithNumber(22) == True)\n assert(isSmithNumber(21) == False)\n assert(isSmithNumber(4) == True)\n assert(isSmithNumber(378) == True)\n assert(isSmithNumber(1) == False)\n assert(isSmithNumber(27) == True)\n assert(isSmithNumber(9) == False)\n assert(isSmithNumber(7) == False)\n print(\"Passed.\")\n\n\ntestIsSmithNumber()\n# print(sum_digits(456))" }, { "alpha_fraction": 0.5876701474189758, "alphanum_fraction": 0.609287440776825, "avg_line_length": 26.173913955688477, "blob_id": "66b83b253758d17c9f9a81948df2c09d4d79d32c", "content_id": "36159aaa8ed7a4eaa7b62641cc88198b15e0cdb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1249, "license_type": "no_license", "max_line_length": 66, "num_lines": 46, "path": "/15112-CMU/week3/test2.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def getAllSubstrings(s):\n substrings = \"\"\n lenOfS = len(s)\n for i in range(0, lenOfS):\n for j in range(i, lenOfS):\n string = s[i:j+1]\n substrings += string + \",\"\n return substrings\n\n\ndef findCommonSubstring(s1, s2):\n commonStrings = \"\"\n substringOfS1 = getAllSubstrings(s1)\n substringOfS2 = getAllSubstrings(s2)\n strings1 = substringOfS1.split(\",\")\n strings2 = substringOfS2.split(\",\")\n for string_1 in strings1:\n for string_2 in strings2:\n if string_1 == string_2:\n commonStrings += string_1 + \",\"\n commonStrings = commonStrings[:-1]\n return commonStrings\n\n\ndef longestCommonSubstring(s1, s2):\n if s1 == \"\" or s2 == \"\":\n return \"\"\n elif s1 == s2:\n return s1\n else:\n pass\n\n\n\n\ndef testLongestCommonSubstring():\n print(\"Testing longestCommonSubstring()...\", end=\"\")\n assert(longestCommonSubstring(\"abcdef\", \"abqrcdest\") == \"cde\")\n assert(longestCommonSubstring(\"abcdef\", \"ghi\") == \"\")\n assert(longestCommonSubstring(\"\", \"abqrcdest\") == \"\")\n assert(longestCommonSubstring(\"abcdef\", \"\") == \"\")\n assert(longestCommonSubstring(\"abcABC\", \"zzabZZAB\") == \"AB\")\n print(\"Passed.\")\n\n\ntestLongestCommonSubstring()" }, { "alpha_fraction": 0.5976340174674988, "alphanum_fraction": 0.6100783348083496, "avg_line_length": 31.91145896911621, "blob_id": "e00c7f0267a68c1178e3e9ab64d4abc5ad85c0cb", "content_id": "fc7ab0539d4ec09678393f02658774c094565855", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6509, "license_type": "no_license", "max_line_length": 104, "num_lines": 192, "path": "/15112-CMU/week8/flappyKim.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "## flappyKim: A basic flappybird-type game\r\n# Code from CMU 15-112 Lecture 2, 03/07/2019, 11/09/2018\r\n# mdtaylor\r\n\r\n##Press space to make Kimchee swim!\r\n# This is a very simple OOPy implementation so...\r\n# ...if you want some OOPy animation practice, add some features!\r\n# 1. Add a score count\r\n# 2. Add treats for Kimchee to eat :)\r\n# 3. Stop the game if Kimchee hits too many obstacles\r\n# 4. Add some difficulty levels\r\n# 5. Add a start/splash screen\r\n# 6. Add a pause screen\r\n# 7. Improve the graphics and/or draw a background\r\n# 8. Smooth out the motion and/or delete offscreen obstacles\r\n# 9. Make the up/down motion velocity-based like the real game\r\n\r\n## Note: Lec1 took a sidescrolling approach, while this code moves the pipes!\r\n# You can approach this problem many different ways! No one solution is \r\n# correct, so it's a matter of organization and preference.\r\n# What are the pros and cons of moving the pipes (this code), \r\n# versus Kimchee moving forward with the view while the pipes stay still?\r\n\r\nfrom tkinter import *\r\nimport random\r\n\r\n####################################\r\n# Define classes\r\n####################################\r\n\r\n##Define Kimchee, Prof. Taylor's axolotl\r\nclass Kimchee(object):\r\n #Establish Kimchee's initial position and radius\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n self.r = 40\r\n \r\n #Draw Kimchee as an oval with a face made of text\r\n def draw(self, canvas):\r\n canvas.create_oval(self.x-self.r, self.y-self.r, self.x+self.r, self.y+self.r, fill=\"grey\")\r\n canvas.create_text(self.x, self.y,text=\">>o u o<<\",font=\"arial 30 bold\")\r\n \r\n #Move Kimchee up and down by vSpeed pixels\r\n def move(self, vSpeed):\r\n self.y-=vSpeed #Positive vSpeed moves him up\r\n \r\n #Return True if Kimchee collides with the input obstacle\r\n def collidesWithObs(self, other):\r\n if (other.hPos<=self.x<=other.hPos+other.width and\r\n (other.top<=self.y+self.r<=other.bottom or \r\n other.top<=self.y-self.r<=other.bottom)):\r\n return True\r\n\r\n\r\n\r\n##Define the obstacle class, a series of moving boxes to avoid\r\nclass Obstacle(object):\r\n #Establish the obstacle's dimensions and location\r\n def __init__(self,top, bottom, hPos, width):\r\n self.top=top\r\n self.bottom=bottom\r\n self.width=width\r\n self.hPos=hPos #This is the left edge of the obstacle\r\n self.color=\"green\"\r\n\r\n #Move the obstacle to the left by hSpeed pixels\r\n def move(self, hSpeed):\r\n self.hPos-=hSpeed\r\n \r\n #Draw the obstacle as a rectangle\r\n def draw(self, canvas):\r\n canvas.create_rectangle(self.hPos, self.top, self.hPos+self.width, self.bottom, fill=self.color)\r\n \r\n \r\n \r\n####################################\r\n# customize these functions\r\n#################################### \r\n\r\n##Set up our animation\r\ndef init(data):\r\n data.timerDelay = 100 # Tweak this to change game speed\r\n data.kc=Kimchee(data.width/4, data.height/2) #Make our Kimchee object\r\n data.tCount=0 #Start our timer at 0\r\n data.obstacles=[] #We'll store obstacles as an empty list\r\n\r\n\r\n\r\n##Can you do something cool with mousePressed?\r\ndef mousePressed(event, data):\r\n pass\r\n\r\n\r\n\r\n##Move Kimchee up when any key is pressed.\r\n#How might you make the motion smoother / less jerky?\r\ndef keyPressed(event, data):\r\n upSpeed=30\r\n data.kc.move(upSpeed)\r\n\r\n\r\n\r\n##Lower Kimchee, move obstacles left, and check for collisions\r\ndef timerFired(data):\r\n data.tCount+=1\r\n \r\n #Move Kimchee down\r\n downSpeed=-10\r\n data.kc.move(downSpeed)\r\n \r\n #Make a new obstacle pair at a set interval\r\n obsInterval=20\r\n gapSize=6\r\n if data.tCount%obsInterval==0:\r\n randHeight=random.randint(0,data.height//2)\r\n gap=gapSize*data.kc.r\r\n \r\n #Our obstacles are added to the obstacle list\r\n data.obstacles+=[Obstacle(0, randHeight, data.width, 50)]\r\n data.obstacles+=[Obstacle(randHeight+gap,data.height, data.width, 50)]\r\n #Note: We aren't deleting them once they leave the screen.\r\n #This might slow down the game after a long time (but you can fix it!)\r\n \r\n #Move every obstacle in our list and then check for Kimchee collisions\r\n obsSpeed=20\r\n for obs in data.obstacles:\r\n obs.move(obsSpeed)\r\n if data.kc.collidesWithObs(obs):\r\n print(\"Oop!\")\r\n obs.color=\"red\" #Change hit obstacles red. What else could we do?\r\n\r\n\r\n\r\n##Call methods to draw Kimchee and all the obstacles in our list\r\ndef redrawAll(canvas, data):\r\n data.kc.draw(canvas)\r\n \r\n for obs in data.obstacles:\r\n obs.draw(canvas)\r\n\r\n\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 50 # milliseconds\r\n root = Tk()\r\n root.resizable(width=False, height=False) # prevents resizing window\r\n init(data)\r\n # create the root and the canvas\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.configure(bd=0, highlightthickness=0)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(600, 600)" }, { "alpha_fraction": 0.6176065802574158, "alphanum_fraction": 0.645116925239563, "avg_line_length": 28.079999923706055, "blob_id": "d557b4db4dfdf9f42cb061b1d72a4880ac940f17", "content_id": "a608f0761009a144dc892252681684105032d7a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 727, "license_type": "no_license", "max_line_length": 68, "num_lines": 25, "path": "/15112-CMU/112-opencv-tutorial-master/blurring.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport cv2\n\n#set up the webcam\nwindow_name = \"Webcam!\"\ncam_index = 1 #my computer's camera is index 1, usually it's 0\ncv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\ncap = cv2.VideoCapture(cam_index)\ncap.open(cam_index)\n\ninBlurMode = False #allows us to turn blur on and off\nwhile True:\n ret, frame = cap.read()\n if frame is not None:\n if inBlurMode:\n frame = cv2.blur(frame, (10,10)) #blur the current frame\n cv2.imshow(window_name, frame)\n k = cv2.waitKey(10) & 0xFF\n if k == 27: #ESC key quits the program\n cv2.destroyAllWindows()\n cap.release()\n break\n if k == ord('b'): #turns blurring on and off\n inBlurMode = not inBlurMode\n" }, { "alpha_fraction": 0.4438461661338806, "alphanum_fraction": 0.45461538434028625, "avg_line_length": 23.490196228027344, "blob_id": "ce3fc7d262420958cdf223d7146a6cc83ff8bd74", "content_id": "761c6471932509420f964429de400bf6beb023d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "no_license", "max_line_length": 74, "num_lines": 51, "path": "/15112-CMU/week5/hw5-driver.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#Updated 2/10/19 for HW5, s19\r\n\r\nimport os\r\nimport sys\r\nimport zipfile\r\n\r\ndef _exit():\r\n if sys.flags.interactive:\r\n try:\r\n exit()\r\n sys.exit()\r\n except:\r\n os._exit(0)\r\n else: sys.exit()\r\n\r\ndef add_to_zip(path, zipf, required=False):\r\n if not os.path.exists(path):\r\n if required:\r\n print('Fail: Unable to find file %s' % path)\r\n raise Exception\r\n return\r\n print('Adding: %s' % path)\r\n zipf.write(path)\r\n\r\ndef intro():\r\n print(\"\"\"\\\r\n _ _ ____ ____ _\r\n/ / |___ \\ | _ \\ _ __(_)_ _____ _ __\r\n| | | __) | | | | | '__| \\ \\ / / _ \\ '__|\r\n| | |/ __/ | |_| | | | |\\ V / __/ |\r\n|_|_|_____| |____/|_| |_| \\_/ \\___|_|\r\n\"\"\")\r\n\r\ndef main():\r\n intro()\r\n zip_name = 'hw5.zip'\r\n zipf = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\r\n required_files = ['hw5-tetris.py', 'hw5.py', 'hw5-ball.py']\r\n optional_files = ['hw5-tetris-bonus.py', 'readme.txt']\r\n try:\r\n for file in required_files: add_to_zip(file, zipf, required=True)\r\n for file in optional_files: add_to_zip(file, zipf, required=False)\r\n except:\r\n zipf.close()\r\n os.remove(zip_name)\r\n _exit()\r\n zipf.close()\r\n print(\"Success!\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.6699702739715576, "alphanum_fraction": 0.6769078373908997, "avg_line_length": 24.225000381469727, "blob_id": "e1f80bed5aeeaa9d06de5381b962552cbfc45364", "content_id": "29f4ee7b6dd25fbb719167569bcd231f746cfa7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1009, "license_type": "no_license", "max_line_length": 58, "num_lines": 40, "path": "/15112-CMU/FIFAworldcup copy2/Sounds.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import os\nimport pygame\nimport random\n\npygame.init()\n\nPATH = \"assets/sounds/\"\nMUSIC_BACKGROUND = PATH + \"music_background/\"\nREDGOAL =\tPATH + \"redgoal/\"\nBLUEGOAL =\tPATH + \"bluegoal/\"\n\nclass Sounds:\n\tdef __init__(self):\n\t\tself.hits = []\n\t\tself.redgoal = None\n\t\tself.bluegoal = None\n\t\tself.music_background = None\n\t\tfor dirs in os.listdir(PATH):\n\t\t\tfor filesound in os.listdir(PATH + dirs):\n\t\t\t\tif filesound.endswith(\".wav\"):\n\t\t\t\t\tif(dirs == \"music_background\"): \n\t\t\t\t\t\tself.music_background = MUSIC_BACKGROUND + filesound\n\t\t\t\t\tif(dirs == \"bluegoal\"): \n\t\t\t\t\t\tself.bluegoal = BLUEGOAL + filesound\n\t\t\t\t\tif(dirs == \"redgoal\"): \n\t\t\t\t\t\tself.redgoal = REDGOAL + filesound\n\tdef music(self):\n\t\tpygame.mixer.music.load(self.music_background)\n\t\tpygame.mixer.music.play(-1)\n\t\tpygame.mixer.music.set_volume(0.6)\n\n\tdef isRedGoal(self):\n\t\tsound = pygame.mixer.Sound(self.redgoal)\n\t\tsound.set_volume(1.0)\n\t\tsound.play()\n\n\tdef isBlueGoal(self):\n\t\tsound = pygame.mixer.Sound(self.bluegoal)\n\t\tsound.set_volume(1.0)\n\t\tsound.play()\n" }, { "alpha_fraction": 0.7417218685150146, "alphanum_fraction": 0.751655638217926, "avg_line_length": 49.5, "blob_id": "fd1336061039829fd137d0e8ba0da09400ffd96d", "content_id": "c79f22de77595c69fe2f02d3c978fda2c30dbd64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 302, "license_type": "no_license", "max_line_length": 182, "num_lines": 6, "path": "/15112-CMU/FIFAworldcup copy/Notes.md", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# Notes:\n\n* Basic writing and formatting syntax: [https://help.github.com/articles/basic-writing-and-formatting-syntax/](https://help.github.com/articles/basic-writing-and-formatting-syntax/).\n* `pngcrush -ow -rem allb -reduce file.png` is used to convert png to not warning\n\n* Convert Blue -> Red: 115" }, { "alpha_fraction": 0.44593507051467896, "alphanum_fraction": 0.4695465564727783, "avg_line_length": 22.745222091674805, "blob_id": "bd66f43f62f1e16c10635600e859c41031dd0d34", "content_id": "5b437d2a256eb4bd770b15aa864abf065e5f2707", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3727, "license_type": "no_license", "max_line_length": 82, "num_lines": 157, "path": "/15112-CMU/week10/practice.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def notTrivialSol(sol):\n for item in sol:\n if len(item) > 1:\n return True\n return False\n\n\ndef palindromeHelper(s, sol):\n if len(s) == 0 and notTrivialSol(sol):\n return sol\n else:\n for i in range(len(s), 0, -1):\n currStr = s[:i]\n if currStr == currStr[::-1]:\n sol += [currStr]\n tmpSol = palindromeHelper(s[i:], sol)\n if tmpSol is not None:\n return tmpSol\n sol.pop()\n return None\n\n\n# print(\"racecar\"[7:])\n\n\ndef palindromePartition(s):\n sol = []\n return palindromeHelper(s, sol)\n\nprint(palindromePartition(\"abc\"))\nprint(palindromePartition(\"racecar\"))\nprint(palindromePartition(\"geeks\"))\nprint(\"\")\n\ndef ct1(L, depth=0):\n print(\"\\t\" * depth + str(L))\n if len(L) == 1:\n result = L[0] ** 2\n elif isinstance(L[0], str):\n result = ct1(L[1:], depth + 1) + ct1([L[-1]], depth + 1)\n else:\n result = ct1(L[1:], depth + 1)\n print(\"\\t\" * depth + \"->\" + str(result))\n return result\n\nct1([\"yeet\", 112, \"yo\", 6])\n\n\nfrom functools import reduce\nL = [1,2,3,2,4,2]\ndef myCount(L, item):\n return reduce (lambda x,y: x+y, list(map(lambda x: 1 if x == item else 0, L)))\n\nprint(myCount(L, 2))\n\n\ndef ct2(d):\n for k in d:\n if k % 2 == 0:\n d[k + 1] = k + 1\n a = set()\n for k1 in d:\n for k2 in d:\n if k1 != k2 and d[k1] == d[k2]:\n a.add((k1,k2))\n return a\n\nd = dict()\nfor i in range(6):\n d[i] = i + 1\n\nprint(ct2(d))\n\n\ndef findTripletsSlow(L):\n result = set()\n for i in range(len(L)):\n for j in range(len(L)):\n for k in range(len(L)):\n if L[j] != L[i]:\n if L[k] != L[i] and L[k] != L[j]:\n if L[i] + L[j] + L[k] == 0:\n result.add((L[i], L[j], L[k]))\n if result != set():\n return result\n else:\n return None\n\n\n\ndef findTriplets(arr):\n result = set()\n n = len(arr)\n for i in range(n - 1):\n s = set()\n for j in range(i + 1, n):\n x = - (arr[i] + arr[j]) # represent the\n # third num you're looking for\n if x in s: # if x has been \"seen\" before\n result.add((x, arr[i], arr[j]))\n else:\n s.add(arr[j])\n return result\nprint(findTripletsSlow([1,0,-3,2,-1]))\n\n\n\ndef findTriplets2(lst):\n result = set()\n for i in range(len(lst) - 1):\n s = set()\n for j in range(i + 1, len(lst)):\n x = -(lst[i] + lst[j])\n if x in s:\n result.add((x, lst[i], lst[j]))\n else:\n s.add(lst[j])\n return result\nprint(findTriplets2([1,0,-3,2,-1]))\n\n\ndef findTripletsFast(arr):\n res = set()\n for i in range(len(arr)):\n for j in range(len(arr)):\n x = arr[i]\n if arr[j] != x:\n y = arr[j]\n else:\n continue\n z = - (x + y)\n if z in set(arr):\n res.add((x, y, z))\n return res\n\n\nprint(findTriplets([1,0]))\nprint(findTriplets([1,0,-3,2,-1]))\n# print(findTripletsFast([1,0,-3,2,-1]))\n\n\ndef palindromePartition3(s, sol = []):\n if len(s) == 0 and notTrivialSol(sol):\n return sol\n else:\n for i in range(len(s), 0, -1):\n currStr = s[:i]\n if currStr == currStr[::-1]:\n sol.append(currStr)\n tmpSol = palindromeHelper(s[i:], sol)\n if tmpSol is not None:\n return tmpSol\n sol.pop()\n\nprint(palindromePartition3(\"geeks\",[]))\nprint(palindromePartition3(\"abcde\",[]))\nprint(palindromePartition3(\"abbc\",[]))" }, { "alpha_fraction": 0.47237569093704224, "alphanum_fraction": 0.5662983655929565, "avg_line_length": 14.125, "blob_id": "8f32f633e133a2ad785d1d409851960c13791383", "content_id": "923775c5543960dc4f5cdd462ea2dec47453ceed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 65, "num_lines": 24, "path": "/15112-CMU/BeerGame/Game.pyw", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\n#\n#\n#\n#\n\n\"\"\"\n#TODO: documentation!\n\"\"\"\n\n__author__ = \"$Author: DR0ID $\"\n__version__ = \"$Revision: 116 $\"\n__date__ = \"$Date: 2007-05-19 10:28:17 +0200 (Sa, 19 Mai 2007) $\"\n__license__ = ''\n__copyright__ = \"DR0ID (c) 2007\"\n__url__ = \"http://www.mypage.bluewin.ch/DR0ID/index.html\"\n__email__ = \"[email protected]\"\n\n\n\nimport _BierGame\n\n_BierGame.main()" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.5771365165710449, "avg_line_length": 31.518047332763672, "blob_id": "108f78514cd044ccfc8a8d1f89149acd7e6fe513", "content_id": "04bfcf9c5ca3c871c990c650844a5193be25394f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15317, "license_type": "no_license", "max_line_length": 79, "num_lines": 471, "path": "/15112-CMU/week8/hw8.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################################\n# Hw8\n# Your Name: Ming Xu\n# Your Andrew ID:mxu2\n# Your Section:2N\n#################################################################\n\nimport random, math\n\n#################################################\n# Hw8 Bird Classes and Subclasses\n#################################################\n\ndef birdClassCollaborators():\n return \"nobody\"\n\n\n# This is the class of bird\nclass Bird(object):\n # init function in Bird class\n def __init__(self, name):\n self.name = name\n self.eggs = 0\n\n # str function in Bird class\n def __repr__(self):\n if self.eggs == 1:\n return self.name + \" has 1 egg\"\n else:\n return self.name + \" has \" + str(self.eggs) + \" eggs\"\n\n # This function compare whether two objects are equal\n def __eq__(self, other):\n return isinstance(other, Bird) and (self.name == other.name)\n\n # This is the hash function in Bird class\n def __hash__(self):\n return hash(self.name)\n\n # This is the fly function in Bird class\n def fly(self):\n return \"I can fly!\"\n\n # This is the count eggs function in Bird class\n def countEggs(self):\n return self.eggs\n\n # This is the lay egg function in Bird class\n def layEgg(self):\n self.eggs += 1\n\n\n# This is the class of Penguin\nclass Penguin(Bird):\n # This is the fly function in subclass\n def fly(self):\n return \"No flying for me.\"\n\n # This is the swim function in Penguin class\n def swim(self):\n return \"I can swim!\"\n\n\n# This is the class of MessageBird\nclass MessengerBird(Bird):\n # init function in MessageBird class\n def __init__(self, name, message=\"\"):\n super().__init__(name)\n self.message = message\n\n # return the information on MessageBird\n def deliverMessage(self):\n return self.message\n\n\n#################################################\n# Hw8 Asteroid Functions\n# All graphics must go under here to avoid angering the autograder!\n# ignore_rest\n#################################################\n\ndef asteroidCollaborators():\n return \"nobody\"\n\n#### OOP Classes ####\n\n## Asteroid and its subclasses, ShrinkingAsteroid and SplittingAsteroid ##\n\nclass Asteroid(object):\n # Model\n def __init__(self, cx, cy, r, speed, direction):\n # An asteroid has a position, size, speed, and direction\n self.cx = cx\n self.cy = cy\n self.r = r\n self.speed = speed\n self.direction = direction\n\n # View\n def draw(self, canvas, color=\"purple\"):\n canvas.create_oval(self.cx - self.r, self.cy - self.r,\n self.cx + self.r, self.cy + self.r,\n fill=color)\n \n # Controller\n def moveAsteroid(self):\n self.cx += self.speed * self.direction[0]\n self.cy += self.speed * self.direction[1]\n\n # this function is to check whether asteroids collide with wall\n def collidesWithWall(self, width, height):\n # Check if the asteroid hits the wall or overlaps it at all\n return self.cx - self.r <= 0 or self.cx + self.r >= width or \\\n self.cy - self.r <= 0 or self.cy + self.r >= height\n\n # this function the reaction of asteroids collide with wall\n def reactToWallHit(self, screenWidth, screenHeight):\n if self.cx + self.r >= screenWidth:\n self.cx = self.r\n elif self.cx - self.r <= 0:\n self.cx = screenWidth - self.r\n elif self.cy - self.r <= 0:\n self.cy = screenHeight - self.r\n elif self.cy + self.r >= screenHeight:\n self.cy = self.r\n\n\nclass ShrinkingAsteroid(Asteroid):\n # Model\n def __init__(self, cx, cy, r, speed, direction):\n # Shrinking Asteroids also track how fast they shrink\n super().__init__(cx, cy, r, speed, direction)\n self.shrinkAmount = 5\n \n # View\n def draw(self, canvas):\n super().draw(canvas, color=\"pink\")\n \n # Controller\n def reactToWallHit(self, screenWidth, screenHeight):\n # this function the reaction of asteroids collide with wall\n if self.cx + self.r >= screenWidth or\\\n self.cx - self.r <= 0:\n self.direction[0] = - self.direction[0]\n self.direction[1] = - self.direction[1]\n if self.cy - self.r <= 0 or\\\n self.cy + self.r >= screenHeight:\n self.direction[0] = - self.direction[0]\n self.direction[1] = - self.direction[1]\n\n # this function shrinks the radius of asteroids\n def AfterCollidesWithBullet(self):\n self.r = self.r - self.shrinkAmount\n\n\n## Rocket class ##\n\nclass Rocket(object):\n # Model\n def __init__(self, cx, cy):\n # A rocket has a position and a current angle it faces\n self.cx = cx\n self.cy = cy\n self.angle = 90\n\n # View\n def draw(self, canvas):\n # Draws a cool-looking triangle-ish shape\n size = 30\n angle = math.radians(self.angle)\n angleChange = 2*math.pi/3\n numPoints = 3\n points = []\n for point in range(numPoints):\n points.append((self.cx + size*math.cos(angle + point*angleChange),\n self.cy - size*math.sin(angle + point*angleChange)))\n points.insert(numPoints-1, (self.cx, self.cy))\n canvas.create_polygon(points, fill=\"green2\")\n\n # Controller\n def rotate(self, numDegrees):\n self.angle += numDegrees\n\n # this is make bullet function\n def makeBullet(self):\n # Generates a bullet heading in the direction the ship is facing\n offset = 35\n x = self.cx + offset*math.cos(math.radians(self.angle)) \n y = self.cy - offset*math.sin(math.radians(self.angle))\n speedLow, speedHigh = 20, 40\n return Bullet(x, y, self.angle, random.randint(speedLow, speedHigh))\n\n## Bullet Class ##\n\nclass Bullet(object):\n # Model\n def __init__(self, cx, cy, angle, speed):\n # A bullet has a position, a size, a direction, and a speed\n self.cx = cx\n self.cy = cy\n self.r = 5\n self.angle = angle\n self.speed = speed\n \n # View\n def draw(self, canvas):\n canvas.create_oval(self.cx - self.r, self.cy - self.r, \n self.cx + self.r, self.cy + self.r,\n fill=\"white\", outline=None)\n\n # Controller\n def moveBullet(self):\n # Move according to the original trajectory\n self.cx += math.cos(math.radians(self.angle))*self.speed\n self.cy -= math.sin(math.radians(self.angle))*self.speed\n\n # this is the function that bullets collides asteroids\n def collidesWithAsteroid(self, other):\n # Check if the bullet and asteroid overlap at all\n if(not isinstance(other, Asteroid)): # Other must be an Asteroid\n return False\n else:\n dist = ((other.cx - self.cx)**2 + (other.cy - self.cy)**2)**0.5\n return dist < self.r + other.r\n\n # this function is to check whether bullets are off screen\n def isOffscreen(self, width, height):\n # Check if the bullet has moved fully offscreen\n return (self.cx + self.r <= 0 or self.cx - self.r >= width) or \\\n (self.cy + self.r <= 0 or self.cy - self.r >= height)\n\n#### Graphics Functions ####\n\nfrom tkinter import *\n\n# this is the model init function\ndef init(data):\n data.rocket = Rocket(data.width//2, data.height//2)\n data.score = 0\n data.counter = 0\n data.bullets = []\n data.asteroids = []\n\n\n# this is the mouse event handler\ndef mousePressed(event, data):\n pass\n\n\n# this is the keyboard handler\ndef keyPressed(event, data):\n if event.keysym == \"Right\":\n data.rocket.rotate(-5)\n elif event.keysym == \"Left\":\n data.rocket.rotate(5)\n elif event.keysym == \"space\":\n data.bullets += [data.rocket.makeBullet()]\n elif event.char == \"r\":\n init(data)\n\n\n# get random element of asteroid\ndef getRandomElement(data):\n r = random.randint(15, 30)\n cx = random.randint(r, data.width - r)\n cy = random.randint(r, data.height - r)\n speed = random.randint(5, 10)\n direction = random.choice([[1, 0], [-1, 0], [0, 1], [0, -1]])\n return cx, cy, r, speed, direction\n\n\n# this function generate normal asteroid\ndef getAsteroidNormal(randomElement):\n return Asteroid(randomElement[0], randomElement[1],\n randomElement[2], randomElement[3],\n randomElement[4])\n\n\n# this function generate shrink asteroid\ndef getAsteroidShrink(randomElement):\n return ShrinkingAsteroid(randomElement[0], randomElement[1],\n randomElement[2], randomElement[3],\n randomElement[4])\n\n\n# this is the time handler\ndef timerFired(data):\n data.counter += data.timerDelay\n for bullet in data.bullets:\n bullet.moveBullet()\n if bullet.isOffscreen(data.width, data.height):\n data.bullets.remove(bullet)\n for asteroid in data.asteroids:\n if bullet.collidesWithAsteroid(asteroid):\n data.bullets.remove(bullet)\n if type(asteroid) == Asteroid:\n data.asteroids.remove(asteroid)\n data.score += 1\n elif type(asteroid) == ShrinkingAsteroid:\n if asteroid.r <= 15:\n data.asteroids.remove(asteroid)\n data.score += 1\n else:\n asteroid.AfterCollidesWithBullet()\n if data.counter % 2000 == 0:\n randomElement = getRandomElement(data)\n asteroidNormal = getAsteroidNormal(randomElement)\n asteroidShrink = getAsteroidShrink(randomElement)\n data.asteroids += [random.choice([asteroidNormal, asteroidShrink])]\n for asteroid in data.asteroids:\n asteroid.moveAsteroid()\n if asteroid.collidesWithWall(data.width, data.height):\n asteroid.reactToWallHit(data.width, data.height)\n\n\n# this is the main draw function\ndef redrawAll(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill=\"gray3\")\n data.rocket.draw(canvas)\n for bullet in data.bullets:\n bullet.draw(canvas)\n for asteroid in data.asteroids:\n asteroid.draw(canvas)\n canvas.create_text(data.width/2, data.height, anchor=\"s\", fill=\"yellow\",\n font=\"Arial 24 bold\", text=\"Score: \" + str(data.score))\n\n#################################################################\n# use the run function as-is\n#################################################################\n\ndef runAsteroids(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\n\n\n#################################################\n# Hw8 Test Functions\n#################################################\n\ndef getLocalMethods(clss):\n import types\n # This is a helper function for the test function below.\n # It returns a sorted list of the names of the methods\n # defined in a class. It's okay if you don't fully understand it!\n result = [ ]\n for var in clss.__dict__:\n val = clss.__dict__[var]\n if (isinstance(val, types.FunctionType)):\n result.append(var)\n return sorted(result)\n\ndef testBirdClasses():\n print(\"Testing Bird classes...\", end=\"\")\n # A basic Bird has a species name, can fly, and can lay eggs\n bird1 = Bird(\"Parrot\")\n assert(type(bird1) == Bird)\n assert(isinstance(bird1, Bird))\n assert(bird1.fly() == \"I can fly!\")\n assert(bird1.countEggs() == 0)\n assert(str(bird1) == \"Parrot has 0 eggs\")\n bird1.layEgg()\n assert(bird1.countEggs() == 1)\n assert(str(bird1) == \"Parrot has 1 egg\")\n bird1.layEgg()\n assert(bird1.countEggs() == 2)\n assert(str(bird1) == \"Parrot has 2 eggs\")\n tempBird = Bird(\"Parrot\")\n assert(bird1 == tempBird)\n tempBird = Bird(\"Wren\")\n assert(bird1 != tempBird)\n nest = set()\n assert(bird1 not in nest)\n assert(tempBird not in nest)\n nest.add(bird1)\n assert(bird1 in nest)\n assert(tempBird not in nest)\n nest.remove(bird1)\n assert(bird1 not in nest)\n assert(getLocalMethods(Bird) == ['__eq__','__hash__','__init__', \n '__repr__', 'countEggs', \n 'fly', 'layEgg'])\n \n # A Penguin is a Bird that cannot fly, but can swim\n bird2 = Penguin(\"Emperor Penguin\")\n assert(type(bird2) == Penguin)\n assert(isinstance(bird2, Penguin))\n assert(isinstance(bird2, Bird))\n assert(not isinstance(bird1, Penguin))\n assert(bird2.fly() == \"No flying for me.\")\n assert(bird2.swim() == \"I can swim!\")\n bird2.layEgg()\n assert(bird2.countEggs() == 1)\n assert(str(bird2) == \"Emperor Penguin has 1 egg\")\n assert(getLocalMethods(Penguin) == ['fly', 'swim'])\n \n # A MessengerBird is a Bird that can optionally carry a message\n bird3 = MessengerBird(\"War Pigeon\", message=\"Top-Secret Message!\")\n assert(type(bird3) == MessengerBird)\n assert(isinstance(bird3, MessengerBird))\n assert(isinstance(bird3, Bird))\n assert(not isinstance(bird3, Penguin))\n assert(not isinstance(bird2, MessengerBird))\n assert(not isinstance(bird1, MessengerBird))\n assert(bird3.deliverMessage() == \"Top-Secret Message!\")\n assert(str(bird3) == \"War Pigeon has 0 eggs\")\n assert(bird3.fly() == \"I can fly!\")\n\n bird4 = MessengerBird(\"Homing Pigeon\")\n assert(bird4.deliverMessage() == \"\")\n bird4.layEgg()\n assert(bird4.countEggs() == 1)\n assert(getLocalMethods(MessengerBird) == ['__init__', 'deliverMessage'])\n print(\"Done!\")\n\n\n\n\n#################################################\n# Hw4 Main\n#################################################\n\ndef testAll():\n testBirdClasses()\n # runAsteroids(600, 600)\n\n\ndef main():\n testAll()\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.6555555462837219, "alphanum_fraction": 0.6727272868156433, "avg_line_length": 27.314285278320312, "blob_id": "735f9bda05268ff2c08ca5db61cc809411668f37", "content_id": "4f7ca921354f40d3305ff5615da6e117f0c6049d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 990, "license_type": "no_license", "max_line_length": 75, "num_lines": 35, "path": "/15112-CMU/112-opencv-tutorial-master/openingVideo.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport cv2\n\nwindow_name = \"Webcam!\"\n\ncam_index = 0 # Default camera is at index 0.\n\n# Create a window to display to\ncv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\n\ncap = cv2.VideoCapture(cam_index) # Video capture object\ncap.open(cam_index) # Enable the camera\n\n# Loop indefinitely\nwhile True:\n\n # Read from the camera, getting the image and some return value\n ret, frame = cap.read()\n\n # If frame is valid, display the image to our window\n if frame is not None:\n cv2.imshow(window_name, frame)\n \n # wait for some key with a small timeout.\n # We need the & 0xFF on 64bit systems to strip just the last 8 bits.\n k = cv2.waitKey(1) & 0xFF\n\n # If we hit the escape key, destroy all windows and release the capture\n # object. If we don't release cleanly, we might still have a lock and\n # no one else could use it, which is bad.\n if k == 27: # Escape key\n cv2.destroyAllWindows()\n cap.release()\n break" }, { "alpha_fraction": 0.6844660043716431, "alphanum_fraction": 0.6917475461959839, "avg_line_length": 26.200000762939453, "blob_id": "c38263c6b97e28b963b376e4a0526b6444912721", "content_id": "707a18655d4dc8a2c00d2d1bd73d09c1440ef6ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 91, "num_lines": 15, "path": "/15112-CMU/FIFAworldcup copy/Background.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport colors\nfrom const import *\n\nPATH = './assets/images/field/soccer-field.png'\n\npygame.init()\n\nclass Background(pygame.Surface):\n def __init__(self):\n super(Background, self).__init__((WINDOW_WIDTH, WINDOW_HEIGHT))\n pygame.draw.rect(self, colors.Gainsboro, (0, 0, TABLE_SCORE_WIDTH, TABLE_SCORE_HEIGHT))\n\n img = pygame.image.load(PATH)\n self.blit(img, (0, TABLE_SCORE_HEIGHT))\n " }, { "alpha_fraction": 0.3812263309955597, "alphanum_fraction": 0.48902347683906555, "avg_line_length": 27.465517044067383, "blob_id": "4dc5057d848f920f14ba6afba09d6e43bfb56a71", "content_id": "1eb58bed35739311e929a5900d04190da865cc33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6605, "license_type": "no_license", "max_line_length": 80, "num_lines": 232, "path": "/15112-CMU/week5/hw5.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw5\n# Your andrewID:mxu2\n# Your section: 2N\n#################################################\n\n\nimport copy\n\n#################################################\n# Hw5 COLLABORATIVE problems\n#################################################\n# The problem in this section is COLLABORATIVE, which means you may\n# work on it with your classmates if you wish. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n\ndef collaborator():\n return \"xiaoqint\"\n\n# find the position of each num in board\ndef findPositionOfNum(num, board):\n rows = len(board)\n cols = rows\n rowOfNum = 0\n colOfNum = 0\n for row in range(rows):\n for col in range(cols):\n if board[row][col] == num:\n rowOfNum = row\n colOfNum = col\n return rowOfNum, colOfNum\n\n# check whether one of the num in board has next step\ndef hasNextStep(num, nextNum, board):\n numRow, numCol = findPositionOfNum(num, board)\n nextNumRow, nextNumCol = findPositionOfNum(nextNum, board)\n if (nextNumRow - numRow) * (nextNumCol - numCol) == 2 or\\\n (nextNumRow - numRow) * (nextNumCol - numCol) == -2:\n return True\n else:\n return False\n\n# return True if board is a Knights Tour and return False otherwise\ndef isKnightsTour(board):\n if board == [[0]]:\n return False\n rows = len(board)\n cols = rows\n compareList = [i for i in range(1, cols ** 2 + 1)]\n tourList = []\n for row in range(rows):\n for col in range(cols):\n tourList.append(board[row][col])\n tourList.sort()\n # check whether tourList meet the requirement of Knights tour\n if compareList != tourList:\n return False\n finalNum = rows * cols\n knightsTour = True\n for num in range(1, finalNum):\n nextNum = num + 1\n if hasNextStep(num, nextNum, board):\n knightsTour = True\n continue\n else:\n knightsTour = False\n break\n return knightsTour\n\n\n# this is the non destructive function to remove\n# rows and cols in a list\ndef nondestructiveRemoveRowAndCol(lst, row, col):\n newLst = copy.deepcopy(lst)\n newLst = newLst[:row] + newLst[row+1:]\n result = []\n for i in range(len(newLst)):\n result += [newLst[i][:col] + newLst[i][col+1:]]\n return result\n\n\n# this is the destructive function to remove\n# rows and cols in a list\ndef destructiveRemoveRowAndCol(lst, row, col):\n lst.pop(row)\n for i in range(len(lst)):\n lst[i].pop(col)\n\n#################################################\n# Hw5 Test Functions\n#################################################\ndef testnondestructiveRemoveRowAndCol():\n print(\"Testing nondestructiveRemoveRowAndCol()...\", end=\"\")\n lst = [[2, 3, 4, 5],\n [8, 7, 6, 5],\n [0, 1, 2, 3]]\n result = [[2, 3, 5],\n [0, 1, 3]]\n # Copy the input list so we can check it later\n lstCopy = copy.deepcopy(lst)\n # The first assert is an ordinary test; the second is a non-destructive test\n assert (nondestructiveRemoveRowAndCol(lst, 1, 2) == result)\n assert (lst == lstCopy), \"input list should not be changed\"\n print(\"passed.\")\n\n\ndef testdestructiveRemoveRowAndCol():\n print(\"Testing destructiveRemoveRowAndCol()...\", end=\"\")\n lst = [[2, 3, 4, 5],\n [8, 7, 6, 5],\n [0, 1, 2, 3]]\n result = [[2, 3, 5],\n [0, 1, 3]]\n # Copy the input list so we can check it later\n lstCopy = copy.deepcopy(lst)\n # The first assert is an ordinary test; the second is a destructive test\n assert (destructiveRemoveRowAndCol(lst, 1, 2) == None)\n assert (lst == result)\n assert (lst != lstCopy), \"input list should be changed\"\n print(\"passed.\")\n\n\ndef testisKnightsTour():\n print(\"Testing isKnightsTour()...\", end=\"\")\n board = [[1, 60, 39, 34, 31, 18, 9, 64],\n\n [38, 35, 32, 61, 10, 63, 30, 17],\n\n [59, 2, 37, 40, 33, 28, 19, 8],\n\n [36, 49, 42, 27, 62, 11, 16, 29],\n\n [43, 58, 3, 50, 41, 24, 7, 20],\n\n [48, 51, 46, 55, 26, 21, 12, 15],\n\n [57, 44, 53, 4, 23, 14, 25, 6],\n\n [52, 47, 56, 45, 54, 5, 22, 13],\n\n ]\n assert (isKnightsTour(board) == True)\n board1 = [[1, 60, 39, 34, 31, 18, 9, 64],\n\n [38, 35, 32, 61, 10, 63, 30, 17],\n\n [59, 2, 37, 40, 33, 28, 19, 8],\n\n [36, 49, 42, 27, 62, 11, 16, 29],\n\n [43, 58, 3, 50, 41, 24, 7, 20],\n\n [48, 51, 46, 55, 26, 21, 12, 15],\n\n [57, 44, 53, 4, 23, 14, 25, 6],\n\n [52, 47, 56, 45, 54, 5, 22, 1],\n\n ]\n assert (isKnightsTour(board1) == False)\n board2 = [[1, 60, 39, 34, 31, 18, 9, 64],\n\n [38, 35, 32, 61, 10, 63, 30, 17],\n\n [59, 2, 37, 40, 33, 28, 19, 8],\n\n [36, 49, 42, 27, 62, 11, 16, 29],\n\n [43, 58, 3, 50, 41, 24, 7, 20],\n\n [48, 51, 46, 55, 26, 21, 12, 15],\n\n [57, 44, 53, 4, 23, 14, 25, 6],\n\n [52, 47, 56, 45, 54, 5, 22, 13],\n\n ]\n assert (isKnightsTour(board2) == True)\n board3 = [[1, 60, 39, 34, 31, 18, 9, 64],\n\n [38, 35, 32, 61, 10, 63, 30, 17],\n\n [59, 2, 37, 40, 33, 28, 19, 8],\n\n [36, 49, 27, 42, 62, 11, 16, 29],\n\n [43, 58, 3, 50, 41, 24, 7, 20],\n\n [48, 51, 46, 55, 26, 21, 12, 15],\n\n [57, 44, 53, 4, 23, 14, 25, 6],\n\n [52, 47, 56, 45, 54, 5, 22, 13],\n\n ]\n assert (isKnightsTour(board3) == False)\n board4 = [[0, 59, 38, 33, 30, 17, 8, 63],\n\n [37, 34, 31, 60, 9, 62, 29, 16],\n\n [58, 1, 36, 39, 32, 27, 18, 7],\n\n [35, 48, 41, 26, 61, 10, 15, 28],\n\n [42, 57, 2, 49, 40, 23, 6, 19],\n\n [47, 50, 45, 54, 25, 20, 11, 14],\n\n [56, 43, 52, 3, 22, 13, 24, 5],\n\n [51, 46, 55, 44, 53, 4, 21, 12],\n ]\n assert (isKnightsTour(board4) == False)\n board5 = [[3, 6, 1],\n [8, 9, 4],\n [5, 2, 7],\n ]\n assert (isKnightsTour(board5) == False)\n board6 = [[19, 14, 3, 8, 25],\n [4, 9, 18, 13, 2],\n [15, 20, 1, 24, 7],\n [10, 5, 22, 17, 12],\n [21, 16, 11, 6, 23]\n ]\n assert (isKnightsTour(board6) == True)\n print(\"passed.\")\n\n# This is the test All function\ndef testAll():\n testisKnightsTour()\n testnondestructiveRemoveRowAndCol()\n testdestructiveRemoveRowAndCol()\n\n" }, { "alpha_fraction": 0.4085637927055359, "alphanum_fraction": 0.42729705572128296, "avg_line_length": 31.02857208251953, "blob_id": "735628c1881c15ba4cc4d67e599fe710dcdc447e", "content_id": "6682eedf201f8f9407a307a4c6cb338e808862b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 73, "num_lines": 35, "path": "/15112-CMU/week10/sampleFiles/233333.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def substrCount(n, s):\n counter = 0\n # following code count palindrome len of Palindrome str longer than 2\n for i in range(n):\n offset = 1\n while i - offset >= 0 and i + offset < n:\n if offset >= 2:\n if s[i - offset] == s[i + offset] and \\\n s[i - offset] == s[i - 1] and s[i + offset] == s[i - 1]:\n counter += 1\n offset += 1\n continue\n else:\n break\n if offset == 1:\n if s[i - offset] == s[i + offset]:\n counter += 1\n offset += 1\n continue\n else:\n break\n # following code counter repeats like \"aa\"\n repeats = 0\n for i in range(n - 1):\n if s[i] == s[i + 1]:\n repeats += 1\n res = counter + repeats + n # n is the palindrome with len 1\n for i in range(len(s)):\n length = 0\n while s[i + length] == s[i]:\n length += 1\n\n return res\nprint(substrCount(4, \"aaaa\"))\nprint(substrCount(7, \"abcbaba\"))\n" }, { "alpha_fraction": 0.5585862398147583, "alphanum_fraction": 0.5674222111701965, "avg_line_length": 22.241071701049805, "blob_id": "0070904fb81c3a1c4ef7719e98b965030340fbf8", "content_id": "6b7c0e33571059e6e4e3f3f1c8483bd575855028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2603, "license_type": "no_license", "max_line_length": 80, "num_lines": 112, "path": "/15112-CMU/untitled folder 2/practtice.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import os\n\ndef readFile(path):\n with open(path, \"rt\") as f:\n return f.read()\n\ndef writeFile(path, contents):\n with open(path, \"wt\") as f:\n f.write(contents)\n\ndef bestNameFileHelper(path, name):\n if os.path.isfile(path):\n contents = readFile(path).lower()\n occurance = contents.count(name)\n return path, occurance\n else:\n current = (path, 0)\n for filename in os.listdir(path):\n newPath = path + os.sep + filename\n temp = bestNameFileHelper(newPath, name)\n if current[1] <= temp[1]:\n current = temp\n return current\n\n\ndef bestNameFile(path, name):\n name = name.lower()\n return bestNameFileHelper(path, name)[0]\n\n\nclass Circle(object):\n numCircles = 0\n def __init__(self, x, y, r):\n self.x = x\n self.y = y\n self.r = r\n Circle.numCircles += 1\n\n def __eq__(self, other):\n if not isinstance(other, Circle):\n return False\n return self.r == other.r\n\n\n def __repr__(self):\n return \"Circle at(%d, %d) with radius %s\" % (self.x, self.y, self.r)\n\n\n def move(self):\n self.y += 10\n\n\n def __hash__(self):\n return hash(self.r) # unchange self.r\n\n\n def draw(self, canvas):\n canvas.create__oval()\n\n\nclass Bullseye(Circle):\n def __init__(self, x, y, r, numRings):\n super().__init__(x, y, r)\n self.numRings = numRings\n\n def __eq__(self, other):\n return self.r == other.r and self.numRings == other.Rings\n\n def __repr__(self):\n return \"Bullseye at (%d, %d)\"\n\n def draw(self, canvas):\n radius = self.r\n for i in range(self.numRings):\n canvas.create__rectangle()\n radius -= 2\n\n\n #\n # def __hash__(self):\n # return hash((self.x, self.y))\n\nimport random\ndef init(data):\n data.circles = set()\n data.timer = 0\n\ndef keyPressed(event, data):\n newX, newY = (random.randint(0, data.width), random.randint(0, data.height))\n r = random.randint(10, 30)\n circle = Circle(newX, newY, r)\n data.circles.add(circle)\n\ndef mousePressed(event , data):\n r = random.randint(15, 30)\n rings = random.randint(2, 5)\n x, y = event.x, event.y\n data.circles.add(Bullseye(x, y, r, rings))\n\ndef timerFired(data):\n data.timer += 1\n data.circles = set()\n newCircles = set()\n for circle in data.circles:\n if isinstance(circle, Bullseye):\n newCircles.add(circle)\n data.circles = newCircles\n \n\ndef redrawAll(canvas, data):\n for circle in data.circles:\n circle.draw(canvas)\n" }, { "alpha_fraction": 0.5373531579971313, "alphanum_fraction": 0.5495455265045166, "avg_line_length": 24.34831428527832, "blob_id": "8adb90aa11fecbc1a37ff9339532bdfd07805461", "content_id": "d3d1c397b92affefabed5d509b12ba83bc156975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4511, "license_type": "no_license", "max_line_length": 72, "num_lines": 178, "path": "/15112-CMU/week10/midterm practice.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def palindromepartition(s):\n sol = []\n return helper(s, sol)\n\n\ndef nonTrivialSol(sol):\n for item in sol:\n if len(item) > 1:\n return True\n return False\n\n\ndef helper(s, sol):\n if len(s) == 0 and nonTrivialSol(sol):\n return sol\n else:\n for i in range(len(s), 0, -1):\n currSubStr = s[0:i]\n if currSubStr == currSubStr[::-1]:\n sol.append(currSubStr)\n temp = helper(s[i:], sol)\n if temp != None:\n return temp\n sol.pop() # undo the move sol.append\n return None\n\n\n\ndef visualizeRecursion(f):\n depth = 0\n def g(*args, **kwargs):\n nonlocal depth\n # global depth\n depth += 1\n res = f(*args, **kwargs)\n depth -= 1\n s = \"\\t\" * depth + \"recursion depth: \" \\\n + str(depth) + ', result: ' + str(res)\n print(s)\n return res\n return g\n\n\n@visualizeRecursion\ndef fact(n):\n if n == 0: return 1\n return n * fact(n-1)\nfact(4)\n\n\ndef findTriplets(arr):\n result = set()\n n = len(arr)\n for i in range(n-1):\n s = set()\n for j in range(i+1, n):\n x = - (arr[i] + arr[j]) # represent the\n # third num you're looking for\n if x in s: # if x has been \"seen\" before\n result.add((x, arr[i], arr[j]))\n else:\n s.add(arr[j])\n return result\n\nprint(findTriplets([1,0,-3,2,-1]))\n\nfrom tkinter import *\n\nclass Circle(object):\n def __init__(self, **kwargs):\n self.x = kwargs['x']\n self.y = kwargs['y']\n self.radius = kwargs['r']\n self.color = kwargs['c']\n\n def __eq__(self, other):\n return isinstance(other, Circle) and self.color == other.color \\\n and self.radius == other.radius\n\n def __repr__(self):\n return \"%s circles of radius %d, at position (%d,%d)\" % \\\n (self.color, self.radius, self.x, self.y)\n\n def __hash__(self):\n return hash((self.radius, self.color))\n\n def draw(self, canvas):\n canvas.create_oval(.....)\n pass\n\n\nclass MC(Circle):\n def __init__(self, Vx, Vy, **kwargs):\n self.velocityX = Vx\n self.velocityY = Vy\n super().__init__(**kwargs)\n\n def move(self):\n self.x += self.velocityX\n self.y += self.velocityY\n\ndef init(data):\n data.circles = []\n\n\ndef timerFired(data):\n import random\n data.time += 1\n x = random.randInt(0, data.width)\n y = random.randInt(5, data.height)\n r = random.randInt(5, 40)\n c = random.Choice([\"red\",\"blue\",\"green\"])\n C = Circle(x= x, y= y, r=r,c=c)\n if (data.time % 100 == 0):\n data.add(createCircle(data))\n\n if (data.time % 500 == 0):\n createMovingCircle()\n\n for c in data.circles:\n if type((c) == MC):\n\n\n\n\n\ndef mousePressed(event, data): pass\n\ndef timerFired(data): pass\n\n\n\n####################################\ndef run(width=500, height=500):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(500, 500)" }, { "alpha_fraction": 0.5365490913391113, "alphanum_fraction": 0.5699107646942139, "avg_line_length": 26.846153259277344, "blob_id": "07997917d241a04039d5e2438d7f1f628d8932ef", "content_id": "db81cb4510678a647a27c552bf2f0b4ba3681b23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4706, "license_type": "no_license", "max_line_length": 70, "num_lines": 169, "path": "/15112-CMU/week10/Knight tour.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this is the decorator for makeExample2DList\ndef print2DListResult(makeExample2DList):\n def printLst(n):\n lst = makeExample2DList(n)\n rows = len(lst)\n cols = len(lst[0])\n res = ''\n for row in range(rows):\n res += (\"[\" + \" \"*n)\n for col in range(cols):\n numOfspace = n\n lenOfnum = len(str(lst[row][col]))\n if lenOfnum > 1:\n numOfspace -= (lenOfnum - 1)\n res += str(lst[row][col])\n res += \" \" * numOfspace\n res += \"]\\n\"\n return res\n return printLst\n\n# this is the main function for make example 2D list\n@print2DListResult\ndef makeExample2DList(n):\n myList=createKnightBoard(n)\n return myList\n\n# find the position of each num in board\ndef findPositionOfNum(num, board):\n rows = len(board)\n cols = rows\n rowOfNum = 0\n colOfNum = 0\n for row in range(rows):\n for col in range(cols):\n if board[row][col] == num:\n rowOfNum = row\n colOfNum = col\n return rowOfNum, colOfNum\n\n# check whether one of the num in board has next step\ndef hasNextStep(num, nextNum, board):\n numRow, numCol = findPositionOfNum(num, board)\n nextNumRow, nextNumCol = findPositionOfNum(nextNum, board)\n if (nextNumRow - numRow) * (nextNumCol - numCol) == 2 or\\\n (nextNumRow - numRow) * (nextNumCol - numCol) == -2:\n return True\n else:\n return False\n\n# return True if board is a Knights Tour and return False otherwise\ndef isKnightsTour(board):\n if board == None:\n return False\n if board == [[0]]:\n return False\n rows = len(board)\n cols = rows\n compareList = [i for i in range(1, cols ** 2 + 1)]\n tourList = []\n for row in range(rows):\n for col in range(cols):\n tourList.append(board[row][col])\n tourList.sort()\n # check whether tourList meet the requirement of Knights tour\n if compareList != tourList:\n return False\n finalNum = rows * cols\n knightsTour = True\n for num in range(1, finalNum):\n nextNum = num + 1\n if hasNextStep(num, nextNum, board):\n knightsTour = True\n continue\n else:\n knightsTour = False\n break\n return knightsTour\n\n\n# this is the create knight's board function\ndef createKnightBoard(n):\n rows, cols = n, n\n board = []\n for i in range(n):\n board += [[0]*cols]\n return board\n\n\ndef traverse(board, x, y, count):\n rows, cols = len(board), len(board)\n board[x][y] = count\n if count >= rows * cols:\n return board\n directionY = [2, 2, 1, -1, -2, -2, -1, 1]\n directionX = [-1, 1, 2, 2, 1, -1, -2, -2]\n numOfdirection = 8\n for i in range(numOfdirection):\n nextX = x + directionX[i]\n nextY = y + directionY[i]\n if (nextX < 0 or nextX >= cols or nextY < 0 or nextY >= rows)\\\n or board[nextX][nextY] != 0:\n continue\n tmpboard = traverse(board, nextX, nextY, count + 1)\n if tmpboard != None:\n return tmpboard\n # if tmpboard is None, then undo the move\n board[nextX][nextY] = 0\n\n\n# This is the main function for createKnightsTour\ndef createKnightsTour(n):\n if n == 1:\n return [[1]]\n if n == 2:\n return None\n board = createKnightBoard(n)\n startRow = 0\n startCol = 0\n count = 1\n traverse(board, startRow, startCol, count)\n if isKnightsTour(board):\n return board\n else:\n return None\n\n\n\ndef testCreateKnightsTour():\n #The only n=1 board:\n board0 = [[1]]\n\n #A few different n=5 boards:\n board1 = [\n [ 1, 20, 9, 14, 3 ],\n [ 10, 15, 2, 19, 24 ],\n [ 21, 8, 25, 4, 13 ],\n [ 16, 11, 6, 23, 18 ],\n [ 7, 22, 17, 12, 5 ],\n ]\n\n board2 = [\n [ 1, 18, 23, 12, 7 ],\n [ 24, 13, 8, 17, 22 ],\n [ 19, 2, 25, 6, 11 ],\n [ 14, 9, 4, 21, 16 ],\n [ 3, 20, 15, 10, 5 ],\n ]\n\n board3 = createKnightsTour(5)\n board6 = createKnightsTour(1)\n\n #Our isKnightsTour function from HW5 should return True for each\n assert(isKnightsTour(board0)==True)\n assert(isKnightsTour(board1)==True)\n assert(isKnightsTour(board2)==True)\n assert(isKnightsTour(board3) ==True)\n assert(createKnightsTour(3) == None)\n assert(createKnightsTour(4) == None)\n assert(isKnightsTour(board6) == True)\n assert(createKnightsTour(2) == None)\n assert (createKnightsTour(1) == [[1]])\n print(\"Passed!\")\n\ntestCreateKnightsTour()\n# print(createKnightsTour(5))\n# print(createKnightsTour(3))\n# print(createKnightsTour(4))\n# print(createKnightsTour(6))\n# print(createKnightsTour(2))\n" }, { "alpha_fraction": 0.5522589087486267, "alphanum_fraction": 0.5569791197776794, "avg_line_length": 36.51948165893555, "blob_id": "973a84b8edd1b9f5c653586d2f482cfb3a8dd23b", "content_id": "1b0721dcb340ae56aa2882ae6714a56f4c0f28d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2966, "license_type": "no_license", "max_line_length": 79, "num_lines": 77, "path": "/15112-CMU/Game_AI/MCTS.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "\r\n# written by Eric Clinch\r\n\r\n###########################################################\r\n# psuedocode for Monte Carlo Tree Search (MCTS) algorithm #\r\n###########################################################\r\n\r\nimport math\r\n\r\nclass MCT(object):\r\n\r\n # creates an unexpanded node\r\n def __init__(self, player):\r\n self.score = 0\r\n self.visits = 0\r\n self.player = player\r\n\r\n # will take moves as keys and have children Monte Carlo Trees as values\r\n self.children = dict()\r\n\r\n # runs a round of the MCTS algorithm and returns the result of the playout\r\n def MCRound(self, board):\r\n if self.visits == 0:\r\n # base case / node expansion stage\r\n moves = board.legalMoves(self.player)\r\n opponent = Minnie if self.player == Maxie else Maxie\r\n for move in moves: # create all the children nodes\r\n self.children[move] = MCT(opponent)\r\n # play a random playout starting from the current board,\r\n # with it starting on self.player's turn.\r\n # Note that randomPlayout must be a nondestructive function,\r\n # and should return 1 if the starting player wins and 0 otherwise\r\n playoutResult = randomPlayout(board, self.player)\r\n self.score += playoutResult\r\n self.visits += 1\r\n return playoutResult\r\n\r\n else:\r\n # recursive case / selection stage\r\n selectedMove = self.selectMove()\r\n selectedChild = self.children[selectedMove]\r\n board.makeMove(selectedMove)\r\n playoutResult = 1 - selectedChild.MCRound(board)\r\n board.undoMove(selectedMove)\r\n self.score += playoutResult\r\n self.visits += 1\r\n return playoutResult\r\n\r\n def getUCB1Score(self, parentVisits):\r\n exploitationTerm = self.score / self.visits\r\n explorationTerm = math.sqrt(2 * math.log(parentVisits) / self.visits)\r\n return exploitationTerm + explorationTerm\r\n\r\n # returns the next move to select in the search\r\n def selectMove(self):\r\n bestScore = 0\r\n bestMove = None\r\n for move in self.children:\r\n child = self.children[move]\r\n if (child.visits == 0):\r\n # this child has never been visited, so select it\r\n return move\r\n moveScore = child.getUCB1Score(self.visits)\r\n if (bestMove == None or moveScore > bestScore):\r\n bestScore = moveScore\r\n bestMove = move\r\n return bestMove\r\n\r\n def getResultMove(self):\r\n # we'll return the move that we have visited the most\r\n mostVisits = 0\r\n bestMove = None\r\n for move in self.children:\r\n child = self.children[move]\r\n if (bestMove == None or child.visits > mostVisits):\r\n mostVisits = child.visits\r\n bestMove = move\r\n return bestMove" }, { "alpha_fraction": 0.6265791058540344, "alphanum_fraction": 0.6415697932243347, "avg_line_length": 28.690000534057617, "blob_id": "1f5015790ca0ced7a4a54c8a5817d949cbe7d965", "content_id": "b86775a8e136a709e0094900074a3e1e1bd357ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5982, "license_type": "no_license", "max_line_length": 86, "num_lines": 200, "path": "/15112-CMU/week8/recitation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport math, random\n\n'''\nIn a mystical land (The Land of the Kannons), there lives a Kannon named Kerry.\nShe is an aspiring broccoli farmer who hopes to someday make it big in the\norganics industry. Alas, on the 6th of March, 2019, the land is attacked by a\nswarm of enemy missiles and Kerry must protect her family by flinging broccoli\nat the missiles to destroy them (a surprisingly effective tactic)\n(she’s also a vegan pacifist like Sammie the Snek, so she doesn’t have modern\nweaponry).\nWill she succeed?\n\nKerry is in the bottom left corner of the screen, represented by a blue\nrectangle. Kerry has a Kannon (represented by a thick black line of length 100)\nthat shoots broccoli. The kannon can rotate between 0 and 90 degrees. Kerry\nstarts out with 10 family members which is displayed in white text on top of her.\nPressing “Up” and “Down” moves the cannon up/down by 5 degrees.\n\nBroccoli are represented by a circle of radius 10, and move at a speed of 40.\nThey also follow gravity (dy decrease by 1 every timer fired). If the user\nclicks, a new broccoli is shot by kerry in the direction of her kannon.\nMissiles are red circles of random radius between 20 and 50 and spawn every\nhalf second on the top half of the screen to the right. They move at a speed of\n10 to the left.\n\nIf a broccoli collides with a missile, the missile gets destroyed\n(not the broccoli tho, bc broccoli is indestructible)\n\nEvery time a missile gets past Kerry, family decreases by 1 and the\nmissile is removed\n\nOnce Kerry’s family shrinks to 5 people, all new thrown Broccoli becomes purple\n\nOnce Kerry’s health becomes 0, the game ends (time stops).\nYou can press ‘r’ to restart\n'''\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n\n\nclass Kerry(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.familyMember = 10\n self.angle = 0\n\n def rotate(self, dangle):\n maxAngle = 0\n minAngle = 0\n newAngle = self.angle + dangle\n if newAngle > maxAngle:\n newAngle = maxAngle\n self.angle = max(newAngle, maxAngle)\n\n def draw(self, canvas):\n # TODO: draw cannon (the rest have been done for you :)\n cannon = 100\n cx = self.x + cannon * math.cos(math.radians(self.angle))\n cy = self.y - cannon * math.sin(math.radians(self.angle))\n canvas.create_line(self.x, self.y, cx, cy, width=20)\n # draw body\n height = 40\n thiccness = 50\n canvas.create_rectangle(self.x - thiccness, self.y - height,\n self.x + thiccness, self.y + height, fill=\"blue\")\n\n # draw family\n canvas.create_text(self.x, self.y, text=self.familyMembers, fill=\"white\")\n\n def shoot(self):\n # TODO: return a Broccoli\n pass\n\n\nclass Projectile(object):\n def __init__(self, x, y, r, dx, dy, color):\n # TODO: initialize attributes\n pass\n\n def move(self):\n # TODO: move self in direction\n pass\n\n def draw(self, canvas):\n canvas.create_oval(self.x - self.r, self.y - self.r,\n self.x + self.r, self.y + self.r, fill=self.color)\n\n def collidesWith(self, other):\n # TODO: return true if self and other are colliding\n return False\n\n\nclass Missile(Projectile):\n color = \"red\"\n\n def __init__(self, x, y, r):\n # TODO: initialize a missile! You may find it helpful to us Projectiles' init\n pass\n\n def collidesWithAnyBroccoli(self, broccolis):\n # TODO: implement\n return False\n\n\nclass Broccoli(Projectile):\n color = \"green\"\n\n def __init__(self, x, y, angle):\n # TODO: initialize a broccoli! You may find it helpful to us Projectiles' init\n pass\n\n def move(self):\n # TODO: add gravity!\n pass\n\n\ndef init(data):\n data.kerry = Kerry(50, data.height - 50)\n data.missles = []\n data.Broccoli = []\n data.count = 0\n\n\ndef mousePressed(event, data):\n pass\n\n\ndef keyPressed(event, data):\n if event.keysym == \"Up\":\n data.kerry.rotate(10)\n elif event.keysym == \"Down\":\n data.kerry.rotate(-10)\n\n\ndef timerFired(data):\n pass\n\n\ndef redrawAll(canvas, data):\n data.kerry.draw(canvas)\n\n\n####################################\n# use the run function as-is\n####################################\n\n# from the 15-112 website\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n # Set up data and call init\n class Struct(object): pass\n\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\n\nrun(800, 800)" }, { "alpha_fraction": 0.6384592652320862, "alphanum_fraction": 0.6518821120262146, "avg_line_length": 25.56589126586914, "blob_id": "f34f12fadad882784413bc4fcb92703c3ebccc16", "content_id": "743faec9c6f8170b3b9611a73c3db60c8b0c58d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3427, "license_type": "no_license", "max_line_length": 77, "num_lines": 129, "path": "/15112-CMU/112-opencv-tutorial-master/opencvTkinterTemplate.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import time\nimport sys\n\n# Tkinter selector\nif sys.version_info[0] < 3:\n from Tkinter import *\n import Tkinter as tk\nelse:\n from tkinter import *\n import tkinter as tk\n\nimport numpy as np\nimport cv2\nfrom PIL import Image, ImageTk\n\ndef opencvToTk(frame):\n \"\"\"Convert an opencv image to a tkinter image, to display in canvas.\"\"\"\n rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(rgb_image)\n tk_image = ImageTk.PhotoImage(image=pil_img)\n return tk_image\n\n\ndef mousePressed(event, data):\n pass\n\n\ndef keyPressed(event, data):\n if event.keysym == \"q\":\n data.root.destroy()\n pass\n\n\ndef timerFired(data):\n pass\n\n\ndef cameraFired(data):\n \"\"\"Called whenever new camera frames are available.\n\n Camera frame is available in data.frame. You could, for example, blur the\n image, and then store that back in data. Then, in drawCamera, draw the\n blurred frame (or choose not to).\n \"\"\"\n\n # For example, you can blur the image.\n data.frame = cv2.GaussianBlur(data.frame, (11, 11), 0)\n \n\ndef drawCamera(canvas, data):\n data.tk_image = opencvToTk(data.frame)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tk_image)\n\n\ndef redrawAll(canvas, data):\n drawCamera(canvas, data)\n\n\ndef run(width=300, height=300):\n\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.camera_index = 0\n\n data.timer_delay = 100 # ms\n data.redraw_delay = 50 # ms\n \n # Initialize the webcams\n camera = cv2.VideoCapture(data.camera_index)\n data.camera = camera\n\n # Make tkinter window and canvas\n data.root = Tk()\n canvas = Canvas(data.root, width=data.width, height=data.height)\n canvas.pack()\n\n # Basic bindings. Note that only timer events will redraw.\n data.root.bind(\"<Button-1>\", lambda event: mousePressed(event, data))\n data.root.bind(\"<Key>\", lambda event: keyPressed(event, data))\n\n # Timer fired needs a wrapper. This is for periodic events.\n def timerFiredWrapper(data):\n # Ensuring that the code runs at roughly the right periodicity\n start = time.time()\n timerFired(data)\n end = time.time()\n diff_ms = (end - start) * 1000\n delay = int(max(data.timer_delay - diff_ms, 0))\n data.root.after(delay, lambda: timerFiredWrapper(data))\n\n # Wait a timer delay before beginning, to allow everything else to\n # initialize first.\n data.root.after(data.timer_delay, \n lambda: timerFiredWrapper(data))\n\n def redrawAllWrapper(canvas, data):\n start = time.time()\n\n # Get the camera frame and get it processed.\n _, data.frame = data.camera.read()\n cameraFired(data)\n\n # Redrawing code\n canvas.delete(ALL)\n redrawAll(canvas, data)\n\n # Calculate delay accordingly\n end = time.time()\n diff_ms = (end - start) * 1000\n\n # Have at least a 5ms delay between redraw. Ideally higher is better.\n delay = int(max(data.redraw_delay - diff_ms, 5))\n\n data.root.after(delay, lambda: redrawAllWrapper(canvas, data))\n\n # Start drawing immediately\n data.root.after(0, lambda: redrawAllWrapper(canvas, data))\n\n # Loop tkinter\n data.root.mainloop()\n\n # Once the loop is done, release the camera.\n print(\"Releasing camera!\")\n data.camera.release()\n\nif __name__ == \"__main__\":\n run(800, 800)\n" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.53125, "avg_line_length": 27.125, "blob_id": "1b16f732177010a361704357af1493217c1def8d", "content_id": "617dbb9dad6d3a4e397c365270ef1ffaa127bb3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 47, "num_lines": 8, "path": "/15112-CMU/week4 cold cold/sub.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def subsets(lst):\n output = [[]]\n for i in range(len(lst)):\n for j in range(len(output)):\n output.append(output[j] + [lst[i]])\n output.remove([])\n return output\nprint(subsets([\"a\",\"b\",\"c\",\"d\"]))" }, { "alpha_fraction": 0.5594059228897095, "alphanum_fraction": 0.5724335312843323, "avg_line_length": 27.399999618530273, "blob_id": "1ba8d4a513173499483a63a44920852b60a466de", "content_id": "61fdf403830ec45755fa55aded303041aeaf88fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3838, "license_type": "no_license", "max_line_length": 79, "num_lines": 135, "path": "/15112-CMU/week10/practice2.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\nclass Circle(object):\n def __init__(self, **kwargs):\n self.x = kwargs[\"x\"]\n self.y = kwargs[\"y\"]\n self.r = kwargs[\"r\"]\n self.color = kwargs[\"c\"]\n\n def hash(self):\n return hash((self.r, self.color))\n\n def __eq__(self, other):\n return isinstance(other, Circle) and \\\n self.r == other.r\n\n def __repr__(self):\n return \"%s circle of radius %d, at position(%d, %d)\"\\\n % (self.color, self.r, self.x, self.y)\n\n def draw(self, canvas):\n canvas.create_oval(self.x - self.r, self.y - self.r,\n self.x + self.r, self.y + self.r, fill = self.color)\n\n\nclass MC(Circle):\n def __init__(self, *args, **kwargs):\n super.__init__(**kwargs)\n self.velocityX = args[0]\n self.velocityY = args[1]\n\n def move(self):\n self.x += self.velocityX\n self.y += self.velocityY\n\n\n####################################\n# customize these functions\n####################################\ndef generateCircle(data):\n import random\n x = random.randint(0, data.width)\n y = random.randint(0, data.height)\n r = random.randint(5, 40)\n color = random.choice([\"red\",\"green\",\"blue\"])\n c = Circle(x = x, y =y, r =r, c = color)\n if c not in data.circles:\n return c\n\ndef movingCircle(data):\n import random\n x = random.randint(0, data.width)\n y = random.randint(0, data.height)\n r = random.randint(5, 40)\n color = random.choice([\"red\", \"green\", \"blue\"])\n vx = random.randint(5, 10)\n vy = random.randint(5, 10)\n c = MC(vx, vy, x=x, y=y, r=r, c=color)\n if c not in data.circles:\n return c\n\ndef init(data):\n data.circles = set()\n data.timer = 0\n\n\ndef mousePressed(event, data):\n # use event.x and event.y\n pass\n\ndef keyPressed(event, data):\n # use event.char and event.keysym\n pass\n\ndef timerFired(data):\n data.timer += data.timerDelay\n if data.timer % 1000 == 0:\n data.circles.add(generateCircle(data))\n if data.timer % 5000 == 0:\n data.circles.add(movingCircle(data))\n\n\ndef redrawAll(canvas, data):\n # draw in canvas\n pass\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 200)\n\n\n\n\n" }, { "alpha_fraction": 0.3752345144748688, "alphanum_fraction": 0.399624764919281, "avg_line_length": 23.136363983154297, "blob_id": "abea8fe67176026572bedd08bd83a578799dcf36", "content_id": "60bd931b92228d506932020f8429441023da69c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "no_license", "max_line_length": 51, "num_lines": 22, "path": "/15112-CMU/week7/practice.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def helper(n, lst):\n if 0 not in lst:\n return lst\n for i in range(len(lst)):\n if i + n + 1 <= len(lst):\n if lst[i] == 0 and lst[i + n + 1] == 0:\n lst[i] = n\n lst[i + n + 1] = n\n tmp = helper(n - 1, lst)\n if tmp is not None:\n return tmp\n lst[i] = 0\n lst[i + n + 1] = 0\n return None\n\n\ndef distList(n):\n lenOflst = 2*n\n lst = [0]*lenOflst\n return helper(n, lst)\n\nprint(distList(4))\n\n\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 19, "blob_id": "7d26871d6499d06d84fdd9ff172679eb60086de9", "content_id": "7492d72dc53e9c4dc81c1e38faf89568a0435e4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19, "license_type": "no_license", "max_line_length": 19, "num_lines": 1, "path": "/15112-CMU/week2/rec2/testtt.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "print (\"I am here\")" }, { "alpha_fraction": 0.49834325909614563, "alphanum_fraction": 0.5056328773498535, "avg_line_length": 24.576271057128906, "blob_id": "028c1487bc81ae580fbd7dc6b07b23769fa0a025", "content_id": "2d31d046affddd2156a09d36d7c96af2eed86df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3018, "license_type": "no_license", "max_line_length": 95, "num_lines": 118, "path": "/15112-CMU/week9/balanced parent.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def checkStr(s):\n stack = []\n for char in s:\n if char in ['(']:\n stack.append(char)\n else:\n if stack == []:\n return False\n top = stack.pop()\n if (top == \"(\" and char != \")\"):\n return False\n return True\n\n\ndef permutations(a):\n if (len(a) == 0):\n return [ [] ]\n else:\n allPerms = [ ]\n for i in range(len(a)):\n partialPermutations = permutations(a[:i] + a[i+1:])\n for subPermutation in partialPermutations:\n allPerms.append([ a[i] ] + subPermutation)\n return allPerms\n\n\ndef generateValidParentheses(n):\n res = set()\n if n == 0:\n return set()\n elif n % 2 != 0:\n return set()\n numOfleft = int(n / 2)\n numOfRight = int(n / 2)\n s = []\n for i in range(numOfleft):\n s += [\"(\"]\n for j in range(numOfRight):\n s += [\")\"]\n allPerms = permutations(s)\n allPermsStr = []\n for lst in allPerms:\n string = \"\"\n for char in lst:\n string += char\n allPermsStr += [string]\n for parent in allPermsStr:\n if checkStr(parent):\n res.add(parent)\n return res\n\n\ndef testGenerateValidParentheses():\n print(\"Testing generateValidParentheses...\", end=\"\")\n assert(generateValidParentheses(4) == { \"(())\", \"()()\" })\n assert(generateValidParentheses(6) == { \"((()))\", \"()(())\", \"(())()\", \"(()())\", \"()()()\" })\n assert(generateValidParentheses(5) == set())\n assert(generateValidParentheses(0) == set())\n print(\"Passed!\")\n\n# print(checkStr(\"(())\"))\n# print(checkStr(\"())(\"))\n# print(checkStr(\")()(\"))\nprint(generateValidParentheses(4))\nprint(generateValidParentheses(6))\nprint(generateValidParentheses(5))\nprint(generateValidParentheses(0))\n\ndef checkStr(s):\n stack = []\n for char in s:\n if char in ['(']:\n stack.append(char)\n else:\n if stack == []:\n return False\n top = stack.pop()\n if (top == \"(\" and char != \")\"):\n return False\n return True\n\n\ndef permutations(a):\n if (len(a) == 0):\n return [ [] ]\n else:\n allPerms = [ ]\n for i in range(len(a)):\n partialPermutations = permutations(a[:i] + a[i+1:])\n for subPermutation in partialPermutations:\n allPerms.append([ a[i] ] + subPermutation)\n return allPerms\n\n\ndef generateValidParentheses(n):\n res = set()\n if n == 0:\n return set()\n elif n % 2 != 0:\n return set()\n numOfleft = int(n / 2)\n numOfRight = int(n / 2)\n s = []\n for i in range(numOfleft):\n s += [\"(\"]\n for j in range(numOfRight):\n s += [\")\"]\n allPerms = permutations(s)\n allPermsStr = []\n for lst in allPerms:\n string = \"\"\n for char in lst:\n string += char\n allPermsStr += [string]\n for parent in allPermsStr:\n if checkStr(parent):\n res.add(parent)\n return res\n" }, { "alpha_fraction": 0.5867311954498291, "alphanum_fraction": 0.5877677798271179, "avg_line_length": 30.45652198791504, "blob_id": "63af5ba0e8ca388b66a271e118e4accc696917e4", "content_id": "0787996fe61a66dd7330810f1b2f7cf4d570f0f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2894, "license_type": "no_license", "max_line_length": 97, "num_lines": 92, "path": "/15112-CMU/demo copy.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import os\ndef printFiles(path):\n # Base Case: a file. Print the path name directly.\n if os.path.isfile(path):\n print(path)\n else:\n # Recursive Case: a directory. Iterate through its files and directories.\n # Note that we have to update the path name to access the inner files!\n print(os.listdir(path))\n for filename in os.listdir(path):\n printFiles(path + \"/\" + filename)\n\nprintFiles(\"sampleFiles\")\n\n# Note: if you see .DS_Store files in the sampleFiles folders, or in the\n# output of your function (as often happens with Macs, in particular),\n# don't worry; this is just a metadata file and can be safely ignored.\nprint(\"###########################\")\n\ndef listFiles(path):\n if os.path.isfile(path):\n # Base Case: return a list of just this file\n return [ path ]\n else:\n # Recursive Case: create a list of all the recursive results from the files in the folder\n files = [ ]\n for filename in os.listdir(path):\n # print(filename)\n files += listFiles(path + \"/\" + filename)\n return files\n\nprint(listFiles(\"sampleFiles\"))\n\n\nimport os\ndef removeTmpFiles(path):\n if path.split(\"/\")[-1] == '.DS_Store':\n os.remove(path)\n elif os.path.isdir(path):\n for filename in os.listdir(path):\n removeTmpFiles(path + \"/\" + filename)\n\n\n# def findLargestFile(path):\n# if os.path.isfile(path):\n# return path\n# else:\n# largestFile = 0\n# largestFilePath = \"\"\n# for filename in os.listdir(path):\n# if filename.startswith('.'):\n# continue\n# tempPath = findLargestFile(path + \"/\" + filename)\n# if os.path.isfile(tempPath):\n# temp = os.path.getsize(tempPath)\n# if largestFile < temp:\n# largestFile = temp\n# largestFilePath = tempPath\n# return largestFilePath\n\n\n# print(removeTmpFiles(\"sampleFiles\"))\nprint(\"\")\n\ndef findLargestFileHelper(path, res, largestFilePath):\n if os.path.isfile(path):\n return path\n else:\n for filename in os.listdir(path):\n if filename == '.DS_Store':\n continue\n tmpPath = findLargestFileHelper(path + \"/\" + filename,\n res, largestFilePath)\n if not os.path.isfile(tmpPath):\n continue\n tmpValue = os.path.getsize(tmpPath)\n if tmpValue >= largestFilePath:\n largestFilePath = tmpValue\n res = tmpPath\n return res\n\n\ndef findLargestFile(path):\n largestFilePath = 0\n res = \"\"\n return findLargestFileHelper(path, res, largestFilePath)\n\n\n\nprint(findLargestFile(\"sampleFiles/folderA\"))\nprint(findLargestFile(\"sampleFiles/folderB\"))\nprint(findLargestFile(\"sampleFiles/folderB/folderF\"))\n" }, { "alpha_fraction": 0.5032967329025269, "alphanum_fraction": 0.5098901391029358, "avg_line_length": 14.724138259887695, "blob_id": "a057c385a865311a9ec288fc944790b354ec1cb9", "content_id": "dcfd57ca5cdd7b4716caad6bfbdef4a374ac8cc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 44, "num_lines": 29, "path": "/15112-CMU/week4 cold cold/permutation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# function to generate all the sub lists\ndef getSubLists(lst):\n # store all the sub lists\n subLists = []\n # first loop\n for i in range(len(lst)):\n # second loop\n for j in range(i + 1, len(lst) + 1):\n # slice the sub array\n sub = lst[i:j]\n print(sub)\n subLists.append(sub)\n # return subLists\n\n\nprint(getSubLists([\"a\",\"b\",\"c\",\"d\"]))\n\n\n\n\n\n\n\n\n\n\n\n#\n# print(combine([\"a\",\"b\",\"c\",\"d\"], 3))" }, { "alpha_fraction": 0.5171042084693909, "alphanum_fraction": 0.5274463295936584, "avg_line_length": 25.16666603088379, "blob_id": "8832824fd2e709f1ba81bd9f27aa7695491aa2de", "content_id": "f5fa3b16ba481aeb4c0e8d505a52f1d6e966ae90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 60, "num_lines": 48, "path": "/15112-CMU/week10/ex1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def isNotTrivialSol(L):\n for c in L:\n if len(c) != 1:\n return True\n return False\n\ndef palinidromePartitionHelper(res, s):\n if len(s) == 0 and isNotTrivialSol(res):\n return res\n if len(s) == 0:\n return None\n else:\n for i in range(len(s), 0, -1):\n if s[:i] == s[:i][::-1]:\n res.append(s[:i])\n tmp = palinidromePartitionHelper(res, s[i:])\n if tmp is not None:\n return tmp\n res.pop()\n return None\n\n\ndef palinidromePartition(s):\n res = []\n return palinidromePartitionHelper(res, s)\n\nprint(palinidromePartition(\"geeks\"))\nprint(palinidromePartition(\"abc\"))\nprint(palinidromePartition(\"racecar\"))\nprint(palinidromePartition(\"abba\"))\nprint(palinidromePartition(\"abbc\"))\n\n\ndef findTriplets(arr):\n result = set()\n n = len(arr)\n for i in range(n-1):\n s = set()\n for j in range(i+1, n):\n x = - (arr[i] + arr[j]) # represent the\n # third num you're looking for\n if x in s: # if x has been \"seen\" before\n result.add((x, arr[i], arr[j]))\n else:\n s.add(arr[j])\n return result\n\nprint(findTriplets([1, 0, -3, 2, -1]))\n\n" }, { "alpha_fraction": 0.5168595910072327, "alphanum_fraction": 0.5269250273704529, "avg_line_length": 24.151899337768555, "blob_id": "98e40554de5eb8fd5f2d44a7f83bb9d2a3404d60", "content_id": "26276a2847f3f0366a312ca0a0c4b183c73781bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1987, "license_type": "no_license", "max_line_length": 81, "num_lines": 79, "path": "/15112-CMU/week9/solutions.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def loadBalance(lst):\n pass\n\nimport os\ndef printFiles(path):\n # Base Case: a file. Print the path name directly.\n if os.path.isfile(path):\n print(path)\n else:\n # Recursive Case: a directory. Iterate through its files and directories.\n # Note that we have to update the path name to access the inner files!\n for filename in os.listdir(path):\n printFiles(path + \"/\" + filename)\n\n\n\ndef generateValidParentheses(n):\n if n == 0 or n % 2 != 0:\n return set()\n return gvpr(n, set())\n\ndef gvpr(n, parens):\n if n == 0:\n return parens\n if parens == set():\n return gvpr(n - 1, {\"(\"})\n else:\n newParens = set()\n for s in parens:\n openP = s.count(\"(\")\n closedP = s.count(\")\")\n if openP > closedP:\n newParens.add(s + \")\")\n if openP - closedP < n:\n newParens.add(s + \"(\")\n return gvpr(n - 1, newParens)\n\nprint(generateValidParentheses(4))\n\n\n\na = [\"wow\", [ [ ] ], [True, 'gosh']]\nb = ['a', ['b'], ['c',['d','e'],'f']]\ndef flattenString(lst):\n res = []\n for item in lst:\n if not (isinstance(item, str) or isinstance(item, list)):\n continue\n elif isinstance(item, str):\n res += [item]\n elif isinstance(item, list):\n res += (flattenString(item))\n return res\n\n\n# return flatten integers\ndef flatten(lst):\n res = []\n for i in lst:\n if not lst:\n return []\n if not (isinstance(i, int) or isinstance(i, list)):\n continue\n if type(i) == int:\n res += [i]\n elif isinstance(i, list):\n res += flatten(i)\n return res\n\n\nprint(flatten([[[[]]],[[\"1\"]],\"1\",[[True]],2,[[2],[3]]]))\nprint(flatten([True]))\nprint(flattenString(a))\nprint(flattenString(b))\nprint(flattenString(([[[[]]],[[\"1\"]],\"1\",[[True]],2,[[2],[3]]])))\nprint(type(True))\nprint(isinstance(True, int))\nprint(hash(True))\nprint(hash(112))\n" }, { "alpha_fraction": 0.5326228737831116, "alphanum_fraction": 0.5982829928398132, "avg_line_length": 33.24543762207031, "blob_id": "da031763205d996504b06d31eeca0d82fd6e1ccb", "content_id": "c565f348a381c7cf5f931289b4e61b1b361ba19e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16890, "license_type": "no_license", "max_line_length": 136, "num_lines": 493, "path": "/15112-CMU/week1/week1/hw1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw1\n# Your andrewID:mxu2\n# Your section: 2 N\n#################################################\n\nimport cs112_s19_week1_linter\n\n# For collaborative problems, you must list your collaborators!\n# Each collaborative problem has a function which you should modify to \n# return a comma-separated string with the andrewIDs of your collaborators.\n# Here is an example which you should not modify!\ndef exampleCollaborators():\n return \"yiqizhou, afu1, justinau\"\n\n#################################################\n# Lab1 COLLABORATIVE LAB problems \n# (Their problem descriptions will be released Friday, Jan 18)\n#################################################\n# The problems in this section are LAB PROBLEMS, which means you MUST\n# work on these with at least one collaborator. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n# For lab problems, YOU MUST LIST AT LEAST ONE COLLABORATOR\n\n\n#### distance is a COLLABORATIVE problem ####\n# Modify the output of distanceCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\n\nimport math # import math module\n\ndef distanceCollaborators():\n return \"afu1\"\n\ndef distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n \n#### isRightTriangle is a COLLABORATIVE problem ####\n# Modify the output of isRightTriangleCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef isRightTriangleCollaborators():\n return \"yiqizhou\"\n\n# The following helper function is from lecture notes\ndef almostEqual(x, y):\n return abs(x - y) < 10**-9\n\n\ndef isRightTriangle(x1, y1, x2, y2, x3, y3):\n return almostEqual(distance(x1,y1,x2,y2)**2 + distance(x2,y2,x3,y3)**2, distance(x1,y1,x3,y3)**2)\\\n or almostEqual(distance(x1,y1,x3,y3)**2 + distance(x2,y2,x3,y3)**2, distance(x1,y1,x2,y2)**2)\\\n or almostEqual(distance(x1,y1,x3,y3)**2 + distance(x2,y2,x1,y1)**2, distance(x2,y2,x3,y3)**2)\n\n\n#### roundPegRectangularHole and rectangularPegRoundHole are COLLABORATIVE ####\n# Modify the output of pegProblemCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef pegProblemCollaborators():\n return \"justinau\"\n \ndef roundPegRectangularHole(r, w, h):\n minsideofrec = min(w, h)\n if 2 * r <= minsideofrec:\n return True\n else:\n return False\n \ndef rectangularPegRoundHole(r, w, h):\n #halfdiag = ((w/2)**2 + (h/2)**2)**0.5\n halfdiag = (w ** 2 + h ** 2) ** 0.5 * 0.5\n if halfdiag <= r:\n return True\n else:\n return False\n\n \n#################################################\n# Hw1 COLLABORATIVE problem\n#################################################\n# The problems in this section are COLLABORATIVE, which means you may\n# work on them with your classmates if you wish. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n \n#### nearestOdd is a COLLABORATIVE problem ####\n# Modify the output of nearestOddCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef nearestOddCollaborators():\n return \"nobody\"\n\n\n# The following function is from this week's lecture notes\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n # You do not need to understand how this function works.\n import decimal\n rounding = decimal.ROUND_HALF_UP\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n\ndef nearestOdd(n):\n if type(n) == int:\n if n % 2 == 1:\n return n\n else:\n return n - 1\n elif type(n) == float:\n if n < 0:\n n = abs(n)\n if roundHalfUp(n) % 2 == 1:\n return roundHalfUp(n) * -1\n elif roundHalfUp(n) % 2 == 0:\n if abs(roundHalfUp(n) + 1 - n) < abs(n - (roundHalfUp(n) - 1)):\n return (roundHalfUp(n) + 1) * -1\n elif abs(roundHalfUp(n) + 1 - n) == abs(n - (roundHalfUp(n) - 1)):\n return (roundHalfUp(n) + 1) * -1\n elif abs(roundHalfUp(n) + 1 - n) > abs(n - (roundHalfUp(n) - 1)):\n return (roundHalfUp(n) - 1) * -1\n elif n > 0:\n if roundHalfUp(n) % 2 == 1:\n return roundHalfUp(n)\n elif roundHalfUp(n) % 2 == 0:\n if abs(roundHalfUp(n) + 1 - n) < abs(n - (roundHalfUp(n) - 1)):\n return roundHalfUp(n) + 1\n elif abs(roundHalfUp(n) + 1 - n) == abs(n - (roundHalfUp(n) - 1)):\n return roundHalfUp(n) - 1\n elif abs(roundHalfUp(n) + 1 - n) > abs(n - (roundHalfUp(n) - 1)):\n return roundHalfUp(n) - 1\n\n\n#### colorBlender is a COLLABORATIVE problem ####\n# Modify the output of colorBlenderCollaborators to return the andrewIDs\n# of your collaborators as a string. Separate each with a comma.\ndef colorBlenderCollaborators():\n return \"nobody\"\n\n\ndef transformRGB(rgb1, rgb2, midpoints, n):\n blue1 = rgb1%10**3\n green1 = (rgb1//10**3)%(10**3)\n red1 = (rgb1//10**3)//10**3\n blue2 = rgb2%10**3\n green2 = (rgb2//10**3)%(10**3)\n red2 = (rgb2//10**3)//10**3\n equalblue = (blue1 - blue2)/(midpoints + 1)\n equalgreen = (green1 - green2)/(midpoints + 1)\n equalred = (red1 - red2)/(midpoints + 1)\n targetblue = roundHalfUp(blue1 - equalblue * n)\n targetgreen = roundHalfUp(green1 - equalgreen * n)\n targetred = roundHalfUp(red1 - equalred * n)\n targetred = str(targetred)\n if len(str(targetgreen)) == 1:\n targetgreen = \"00\" + str(targetgreen)\n elif len(str(targetgreen)) == 2:\n targetgreen = \"0\" + str(targetgreen)\n else:\n targetgreen = str(targetgreen)\n\n if len(str(targetblue)) == 1:\n targetblue = \"00\" + str(targetblue)\n elif len(str(targetblue)) == 2:\n targetblue = \"0\" + str(targetblue)\n else:\n targetblue = str(targetblue)\n\n return targetred + targetgreen + targetblue\n\ndef colorBlender(rgb1, rgb2, midpoints, n):\n if n < 0 or n > (midpoints + 1):\n return None\n elif 0 <= n <= (midpoints + 1):\n return int(transformRGB(rgb1, rgb2, midpoints, n))\n\n\n\n\n#################################################\n# Hw1 SOLO problems\n#################################################\n# These problems must be completed WITHOUT COLLABORATION. See the collaboration\n# policy in the syllabus for more details. You may always use piazza, \n# office hours, and other official 15-112 course resources for questions.\n\n\ndef syllabusAnswer():\n return \"\"\"\n1: Family/Personal Emergencies\n2: Students may only use electronic devices in lecture during learning activities which involve those devices\n3: No I can not!\n4: If I missing my laptop/phone on a specific lecture day, or if the form does not load for me,\n I may approach the instructors personally at the end of class to be marked as attending\n5: I submit a 15-112 Regrade Request form on line within three weeks of the time that the contested grade was released.\n\"\"\"\n\n\ndef debuggingAnswer():\n return \"This is a logical error because if an even integer is greater than 10 such as 14, this function will return None\" \\\n \"What I add to fix this problem is to add two lines of codes after the conditional if statement if x % 2 == 1: return True\" \\\n \"add two lines of codes if x % 2 == 1: return True \\ else: return False \"\n\n\ndef rocAnswer():\n return 126\n\n\n#### the following three functions go together ####\n# Note: You'll need to use distance(x1,y1,x2,y2) as a helper function!\n# Wait to do this problem until after you write distance in Friday's lab\ndef lineIntersection(m1, b1, m2, b2):\n if m1 == m2:\n return None\n else:\n return (b1 - b2)/(m2 - m1)\n\n\ndef triangleArea(s1, s2, s3):\n s = 0.5 * (s1 + s2 + s3)\n return math.sqrt(s * (s - s1) * (s - s2) * (s - s3))\n\n\ndef threeLinesArea(m1, b1, m2, b2, m3, b3):\n if m1 == m2 or m1 == m3 or m2 == m3 or m1 == m2 == m3:\n return 0\n else:\n x1 = lineIntersection(m1, b1, m2, b2)\n y1 = m1 * x1 + b1\n x2 = lineIntersection(m1, b1, m3, b3)\n y2 = m3 * x2 + b3\n x3 = lineIntersection(m2, b2, m3, b3)\n y3 = m2 * x3 + b2\n d1 = distance(x1, y1, x2, y2)\n d2 = distance(x1, y1, x3, y3)\n d3 = distance(x2, y2, x3, y3)\n return triangleArea(d1, d2, d3)\n\n\n#### the following two functions go together ####\n\ndef getKthDigit(n, k):\n if n < 0:\n n = abs(n)\n if n == 0:\n return 0\n if k == 0:\n return n % 10\n elif n // (10 ** k) == 0:\n return n // 10 ** k\n else:\n return n // (10 ** k) % 10\n\n\ndef setKthDigit(n, k, d):\n if n < 0:\n n = abs(n)\n if k == 0:\n return ((n // 10) * 10 + d) * -1\n if n // 10 ** k == 0:\n return (10 ** k * d + n) * -1\n else:\n currentDigit = getKthDigit(n, k)\n return (n - currentDigit * (10 ** k) + d * (10 ** k)) * -1\n if k == 0:\n return (n // 10) * 10 + d\n if n // 10 ** k == 0:\n return 10 ** k * d + n\n else:\n currentDigit = getKthDigit(n, k)\n return n - currentDigit * (10 ** k) + d * (10 ** k)\n\n\n#### bonusFindIntRootsOfCubic is a bonus problem, and therefore optional ####\n# Note: Bonus problems are solo. Do not collaborate on bonus problems. \n \ndef bonusFindIntRootsOfCubic(a, b, c, d):\n return\n\n\n\n\n\n\n#################################################\n# Hw1 Test Functions\n# ignore_rest\n#################################################\n\ndef testDistance():\n import math\n print(\"Testing distance()...\", end=\"\")\n assert(math.isclose(distance(0, 0, 1, 1), 2**0.5))\n assert(math.isclose(distance(3, 3, -3, -3), 6*2**0.5))\n assert(math.isclose(distance(20, 20, 23, 24), 5))\n print(\"Passed.\")\n\ndef testIsRightTriangle():\n print('Testing isRightTriangle()... ', end='')\n assert(isRightTriangle(0, 0, 0, 3, 4, 0) == True)\n assert(isRightTriangle(1, 1.3, 1.4, 1, 1, 1) == True)\n assert(isRightTriangle(9, 9.12, 8.95, 9, 9, 9) == True)\n assert(isRightTriangle(0, 0, 0, math.pi, math.e, 0) == True)\n assert(isRightTriangle(0, 0, 1, 1, 2, 0) == True)\n assert(isRightTriangle(0, 0, 1, 2, 2, 0) == False)\n assert(isRightTriangle(1, 0, 0, 3, 4, 0) == False)\n print('Passed.')\n\n \ndef testRoundPegRectangularHole():\n print(\"Testing roundPegRectangularHole()...\", end=\"\")\n assert(roundPegRectangularHole(1,2,3)==True)\n assert(roundPegRectangularHole(4,5,6)==False)\n assert(roundPegRectangularHole(1,20,10)==True)\n assert(roundPegRectangularHole(10,2,30)==False)\n print(\"Passed.\")\n \ndef testRectangularPegRoundHole():\n print(\"Testing rectangularPegRoundHole()...\", end=\"\")\n assert(rectangularPegRoundHole(1,2,3)==False)\n assert(rectangularPegRoundHole(5,4,6)==True)\n assert(rectangularPegRoundHole(2,4,4)==False)\n assert(rectangularPegRoundHole(5,8,6)==True)\n assert(rectangularPegRoundHole(6,10,8)==False)\n print(\"Passed.\")\n \ndef testNearestOdd():\n print('Testing nearestOdd()... ', end='')\n assert(nearestOdd(13) == 13)\n assert(nearestOdd(12.001) == 13)\n assert(nearestOdd(12) == 11)\n assert(nearestOdd(11.999) == 11)\n assert(nearestOdd(-13) == -13)\n assert(nearestOdd(-12.001) == -13)\n assert(nearestOdd(-12) == -13)\n assert(nearestOdd(-11.999) == -11)\n print('Passed.')\n\ndef testColorBlender():\n print(\"Testing colorBlender()...\", end=\"\")\n # http://meyerweb.com/eric/tools/color-blend/#DC143C:BDFCC9:3:rgbd\n assert(colorBlender(220020060, 189252201, 3, -1) == None)\n assert(colorBlender(220020060, 189252201, 3, 0) == 220020060)\n assert(colorBlender(220020060, 189252201, 3, 1) == 212078095)\n assert(colorBlender(220020060, 189252201, 3, 2) == 205136131)\n assert(colorBlender(220020060, 189252201, 3, 3) == 197194166)\n assert(colorBlender(220020060, 189252201, 3, 4) == 189252201)\n assert(colorBlender(220020060, 189252201, 3, 5) == None)\n # http://meyerweb.com/eric/tools/color-blend/#0100FF:FF0280:2:rgbd\n assert(colorBlender(1000255, 255002128, 2, -1) == None)\n assert(colorBlender(1000255, 255002128, 2, 0) == 1000255)\n assert(colorBlender(1000255, 255002128, 2, 1) == 86001213)\n assert(colorBlender(1000255, 255002128, 2, 2) == 170001170)\n assert(colorBlender(1000255, 255002128, 2, 3) == 255002128)\n print(\"Passed.\")\n\ndef testSyllabusAnswer():\n print(\"Your answer to the syllabus question is:\")\n print(syllabusAnswer())\n print(\"The TAs will grade this later.\")\n print()\n\ndef testDebuggingAnswer():\n print(\"Your answer to the debugging question is:\")\n print(debuggingAnswer())\n print(\"The TAs will grade this later.\")\n print()\n\ndef roc(x):\n if type(x) != int:\n return False\n elif x <= 120:\n return False\n elif x % 100 == x - 100:\n a = x // 10\n b = x % 10\n if a != 2 * b:\n return False\n return True\n else:\n return x == 42\n\ndef testRocAnswer():\n print(\"Testing rocAnswer()...\", end=\"\")\n answer = rocAnswer()\n assert(roc(answer) == True)\n print(\"Passed.\")\n\ndef testLineIntersection():\n import math\n print(\"Testing lineIntersection()...\", end=\"\")\n assert(lineIntersection(2.5, 3, 2.5, 11) == None)\n assert(lineIntersection(25, 3, 25, 11) == None)\n # y=3x-5 and y=x+5 intersect at (5,10)\n assert(math.isclose(lineIntersection(3,-5,1,5), 5))\n # y=10x and y=-4x+35 intersect at (2.5,25)\n assert(math.isclose(lineIntersection(10,0,-4,35), 2.5))\n print(\"Passed.\")\n\ndef testTriangleArea():\n import math\n print(\"Testing triangleArea()...\", end=\"\")\n assert(math.isclose(triangleArea(3,4,5), 6))\n assert(math.isclose(triangleArea(2**0.5, 1, 1), 0.5))\n assert(math.isclose(triangleArea(2**0.5, 2**0.5, 2), 1))\n print(\"Passed.\")\n\ndef testThreeLinesArea():\n import math\n print(\"Testing threeLinesArea()...\", end=\"\")\n assert(math.isclose(threeLinesArea(1, 2, 3, 4, 5, 6), 0))\n assert(math.isclose(threeLinesArea(0, 7, 1, 0, -1, 2), 36))\n assert(math.isclose(threeLinesArea(0, 3, -.5, -5, 1, 3), 42.66666666666))\n assert(math.isclose(threeLinesArea(1, -5, 0, -2, 2, 2), 25))\n assert(math.isclose(threeLinesArea(0, -9.75, -6, 2.25, 1, -4.75), 21))\n print(\"Passed.\")\n\ndef testGetKthDigit():\n print(\"Testing getKthDigit()...\", end=\"\")\n assert(getKthDigit(809, 0) == 9)\n assert(getKthDigit(809, 1) == 0)\n assert(getKthDigit(809, 2) == 8)\n assert(getKthDigit(809, 3) == 0)\n assert(getKthDigit(0, 100) == 0)\n assert(getKthDigit(-809, 0) == 9)\n print(\"Passed.\")\n\ndef testSetKthDigit():\n print(\"Testing setKthDigit()...\", end=\"\")\n assert(setKthDigit(809, 0, 7) == 807)\n assert(setKthDigit(809, 1, 7) == 879)\n assert(setKthDigit(809, 2, 7) == 709)\n assert(setKthDigit(809, 3, 7) == 7809)\n assert(setKthDigit(0, 4, 7) == 70000)\n assert(setKthDigit(-809, 0, 7) == -807)\n print(\"Passed.\")\n\ndef getCubicCoeffs(k, root1, root2, root3):\n # Given roots e,f,g and vertical scale k, we can find\n # the coefficients a,b,c,d as such:\n # k(x-e)(x-f)(x-g) =\n # k(x-e)(x^2 - (f+g)x + fg)\n # kx^3 - k(e+f+g)x^2 + k(ef+fg+eg)x - kefg\n e,f,g = root1, root2, root3\n return k, -k*(e+f+g), k*(e*f+f*g+e*g), -k*e*f*g\n\ndef testFindIntRootsOfCubicCase(k, z1, z2, z3):\n import math\n a,b,c,d = getCubicCoeffs(k, z1, z2, z3)\n result1, result2, result3 = bonusFindIntRootsOfCubic(a,b,c,d)\n m1 = min(z1, z2, z3)\n m3 = max(z1, z2, z3)\n m2 = (z1+z2+z3)-(m1+m3)\n actual = (m1, m2, m3)\n assert(math.isclose(m1, result1))\n assert(math.isclose(m2, result2))\n assert(math.isclose(m3, result3))\n\ndef testBonusFindIntRootsOfCubic():\n print(\"Testing bonusFindIntRootsOfCubic()...\", end=\"\")\n testFindIntRootsOfCubicCase(5, 1, 3, 2)\n testFindIntRootsOfCubicCase(2, 5, 33, 7)\n testFindIntRootsOfCubicCase(-18, 24, 3, -8)\n testFindIntRootsOfCubicCase(1, 2, 3, 4)\n print(\"Passed.\")\n\n#################################################\n# Hw1 Main\n#################################################\n\ndef testAll():\n testDistance()\n testIsRightTriangle()\n testRoundPegRectangularHole()\n testRectangularPegRoundHole()\n testNearestOdd()\n testColorBlender()\n testSyllabusAnswer()\n testDebuggingAnswer()\n testRocAnswer()\n testLineIntersection()\n testTriangleArea()\n testThreeLinesArea()\n testGetKthDigit()\n testSetKthDigit()\n\n\n \n #Uncomment the next line if you want to try the bonus!\n #testBonusFindIntRootsOfCubic() \n\ndef main():\n cs112_s19_week1_linter.lint() # check for banned tokens\n testAll()\n\nif __name__ == '__main__':\n main()\n\n\n\n " }, { "alpha_fraction": 0.5777431130409241, "alphanum_fraction": 0.5949037671089172, "avg_line_length": 33.9636344909668, "blob_id": "4a1c39481cdb7bd00beba240f1475bcd99966b00", "content_id": "cd5f52d77f6823a453421ed694971008c8825e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1923, "license_type": "no_license", "max_line_length": 76, "num_lines": 55, "path": "/15112-CMU/week3/test1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def applyCaesarCipher(message, shiftNum):\n cipherText = \"\"\n for ch in message:\n if ch.isspace():\n finalLetter = ch\n cipherText += finalLetter\n\n elif ch.isalpha() and ch.islower() and shiftNum > 0:\n stayInAlphabet = ord(ch) + shiftNum\n if stayInAlphabet > ord('z'):\n stayInAlphabet -= 26\n finalLetter = chr(stayInAlphabet)\n cipherText += finalLetter\n\n elif ch.isalpha() and ch.islower() and shiftNum < 0:\n stayInAlphabet = ord(ch) + shiftNum\n if stayInAlphabet < ord('a'):\n stayInAlphabet += 26\n finalLetter = chr(stayInAlphabet)\n cipherText += finalLetter\n\n elif ch.isalpha() and ch.isupper() and shiftNum > 0:\n stayInAlphabet = ord(ch) + shiftNum\n if stayInAlphabet > ord('Z'):\n stayInAlphabet -= 26\n finalLetter = chr(stayInAlphabet)\n cipherText += finalLetter\n\n elif ch.isalpha() and ch.isupper() and shiftNum < 0:\n stayInAlphabet = ord(ch) + shiftNum\n if stayInAlphabet < ord('A'):\n stayInAlphabet += 26\n finalLetter = chr(stayInAlphabet)\n cipherText += finalLetter\n else:\n cipherText += ch\n return cipherText\n\n\n\n\ndef testApplyCaesarCipher():\n print(\"Testing applyCaesarCipher()...\", end=\"\")\n assert(applyCaesarCipher(\"abcdefghijklmnopqrstuvwxyz\", 3) == \\\n \"defghijklmnopqrstuvwxyzabc\")\n assert(applyCaesarCipher(\"We Attack At Dawn\", 1) == \"Xf Buubdl Bu Ebxo\")\n assert(applyCaesarCipher(\"1234\", 6) == \"1234\")\n print(\"Passed.\")\n\ntestApplyCaesarCipher()\n# applyCaesarCipher(\"We Attack At Dawn\", 1)\n# applyCaesarCipher(\"abcdefghijklmnopqrstuvwxyz\", 3)\n# applyCaesarCipher(\"zodiac\", -2)\n# applyCaesarCipher(\"1234\", 6)\nprint(applyCaesarCipher('What. An. Evil! Test? Case@[`{', 25))\n" }, { "alpha_fraction": 0.4841812252998352, "alphanum_fraction": 0.5393571257591248, "avg_line_length": 28.70676612854004, "blob_id": "9b8748e5e22628c7894e498e66f0b46979a387b3", "content_id": "7bb67621c9b5086756253bcdb0d6727ddb2cf3cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3951, "license_type": "no_license", "max_line_length": 62, "num_lines": 133, "path": "/15112-CMU/week2/test3.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "\ndef isKaprekarNumber(n):\n if n < 1:\n return False\n lenOfNum = len(str(n))\n A = n**2 // 10**lenOfNum\n B = n**2 % 10**lenOfNum\n if A + B == n:\n return True\n else:\n return False\n\n\ndef nthKaprekarNumber(n):\n found = -1 # !!!!!\n guess = 0\n while (found < n):\n guess += 1\n if (isKaprekarNumber(guess)):\n found += 1\n return guess\n\n\ndef nearestKaprekarNumber(n):\n if n <= 1:\n return 1\n count = 1\n if type(n) == int:\n if isKaprekarNumber(n):\n return n\n while not isKaprekarNumber(n):\n a = n - count\n if isKaprekarNumber(a):\n return a\n b = n + count\n if isKaprekarNumber(b):\n return b\n count += 1\n\n if type(n) == float:\n import math\n rawnum = n\n count = 1\n if (n - int(n)) <= 0.5:\n if isKaprekarNumber(int(n)):\n return int(n)\n while not isKaprekarNumber(int(n)):\n a = int(n) - count\n if isKaprekarNumber(a):\n targeta = a\n break\n count += 1\n count = 1\n while not isKaprekarNumber(int(n)):\n b = int(n) + count\n if isKaprekarNumber(b):\n targetb = b\n break\n count += 1\n if abs(rawnum - targeta) <= abs(targetb - rawnum):\n return targeta\n if abs(rawnum - targeta) > abs(targetb - rawnum):\n return targetb\n\n if (n - int(n)) > 0.5:\n if isKaprekarNumber(math.ceil(n)):\n return math.ceil(n)\n while not isKaprekarNumber(math.ceil(n)):\n a = math.ceil(n) - count\n if isKaprekarNumber(a):\n targeta = a\n # print(targeta)\n break\n count += 1\n count = 1\n while not isKaprekarNumber(math.ceil(n)):\n b = math.ceil(n) + count\n if isKaprekarNumber(b):\n targetb = b\n # print(targetb)\n break\n count += 1\n if abs(rawnum - targeta) <= abs(targetb - rawnum):\n return targeta\n if abs(rawnum - targeta) > abs(targetb - rawnum):\n return targetb\n\n\n\n\n\n\n\n\n\n\nprint(nearestKaprekarNumber(9376543))\n# print(nearestKaprekarNumber(2475.51))\n# print(nearestKaprekarNumber(4.99999999))\nprint(\"\")\nprint(nearestKaprekarNumber(5.51))\n\n\ndef testNearestKaprekarNumber():\n print(\"Testing nearestKaprekarNumber()...\", end=\"\")\n print(\"Testing nearestKaprekarNumber()...\", end=\"\")\n assert(nearestKaprekarNumber(1) == 1)\n assert(nearestKaprekarNumber(0) == 1)\n assert(nearestKaprekarNumber(-1) == 1)\n assert(nearestKaprekarNumber(-2) == 1)\n assert(nearestKaprekarNumber(-12345) == 1)\n assert(nearestKaprekarNumber(1.234) == 1)\n assert(nearestKaprekarNumber(4.99999999) == 1)\n assert(nearestKaprekarNumber(100.99999999) == 99)\n assert(nearestKaprekarNumber(5) == 1)\n assert(nearestKaprekarNumber(5.00000001) == 9)\n assert(nearestKaprekarNumber(27) == 9)\n assert(nearestKaprekarNumber(28) == 45)\n assert(nearestKaprekarNumber(45) == 45)\n assert(nearestKaprekarNumber(50) == 45)\n assert(nearestKaprekarNumber(51) == 55)\n assert(nearestKaprekarNumber(1611) == 999)\n assert(nearestKaprekarNumber(1612) == 2223)\n assert(nearestKaprekarNumber(2475.4) == 2223)\n assert(nearestKaprekarNumber(2475.5) == 2223)\n assert(nearestKaprekarNumber(2475.51) == 2728)\n assert(nearestKaprekarNumber(2475.6) == 2728)\n assert(nearestKaprekarNumber(995123) == 994708)\n assert(nearestKaprekarNumber(9376543) == 9372385)\n assert(nearestKaprekarNumber(13641234) == 13641364)\n print(\"Passed.\")\n\n\ntestNearestKaprekarNumber()" }, { "alpha_fraction": 0.6536712050437927, "alphanum_fraction": 0.6770630478858948, "avg_line_length": 31.02083396911621, "blob_id": "fa4324934ae2473312cc9bd46744ec587b1310cc", "content_id": "6f57204fe3d757682f1e8e5fb350e4d9250c828b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 105, "num_lines": 48, "path": "/15112-CMU/FIFAworldcup copy2/SettingBoard.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport colors\nfrom const import *\nfrom Text import *\n\npygame.init()\n\nWIDTH = WITH_SETTING_BOARD\nHEIGHT = HEIGHT_SETTING_BOARD\n\nMARGIN = WIDTH / 6\nBUTTON_WIDTH = 100\nBUTTON_HEIGHT = 50\nclass SettingBoard(pygame.Surface):\n def __init__(self): \n super(SettingBoard, self).__init__((WIDTH, HEIGHT))\n self.fill(colors.Blue)\n Text.showSettingText('SETTING', colors.White,self, TABLE_SCORE_HEIGHT)\n self.newGameRect = self.makeButton(\"NEW GAME\", TABLE_SCORE_HEIGHT*1, 20 )\n self.pVp = self.makeButton(\"PvP\", TABLE_SCORE_HEIGHT*2, 70 )\n self.computer = self.makeButton(\"COMPUTER\", TABLE_SCORE_HEIGHT*3, 30 )\n\n def renderSettingButton(self):\n self.settingButton(\"SETTING\", 0, 700)\n\n def makeButton(self ,s, hight , margin=0):\n rect = pygame.Rect(MARGIN , hight*3 , WIDTH_OF_PAUSE_GAME - MARGIN*2, BUTTON_HEIGHT)\n titleSurf, titleRect = Text.makeTextObject(s, colors.Green)\n pygame.draw.rect(self, colors.Black, rect)\n self.blit(titleSurf, (MARGIN + margin, hight*3 + 5 , WIDTH_OF_PAUSE_GAME - MARGIN*2, BUTTON_HEIGHT))\n return rect\n\n def click(self, x, y ):\n x = x - (WINDOW_WIDTH - WIDTH) / 2\n y = y - (WINDOW_HEIGHT - HEIGHT) / 2\n if( self.newGameRect.collidepoint(x,y)):\n return self.handleNewGame()\n if( self.pVp.collidepoint(x,y)):\n return self.handlepVp()\n if( self.computer.collidepoint(x,y)):\n return self.handleComputer()\n \n def handleNewGame(self):\n return 1\n def handlepVp(self):\n print(2222) \n def handleComputer(self):\n print(3333) \n" }, { "alpha_fraction": 0.5087146162986755, "alphanum_fraction": 0.516339898109436, "avg_line_length": 21.096385955810547, "blob_id": "916df918ea3476a59b9c0438e2efeb1bdc8cafd6", "content_id": "8ec7c9454c938e0aabbfc85be6f2b3550645d5a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1836, "license_type": "no_license", "max_line_length": 43, "num_lines": 83, "path": "/15112-CMU/week10/palindrome.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def getSubStrings(s):\n res = []\n for i in range(len(s) + 1):\n for j in range(i, len(s)+1):\n currStr = s[i:j]\n print(currStr)\n if currStr == currStr[::-1]\\\n and len(currStr) != 1 \\\n and currStr != \"\":\n res.append(currStr)\n for c in s:\n res.append(c)\n return res\n\n# print(getSubStrings(\"geeks\"))\n# print(getSubStrings(\"aaaa\"))\n\n\n\ndef getSubStr(s):\n if len(s) == 1:\n return s\n else:\n partialStr = getSubStr(s[1:])\n res = []\n for subStr in partialStr:\n res.append(subStr)\n res.append(s[0] + subStr)\n return res\n\n# print(getSubStr(\"aaaa\"))\n# print(getSubStr(\"geeks\"))\n\ndef getSubStr(s):\n if len(s) == 1:\n return s\n else:\n partialStr = getSubStr(s[1:])\n res = []\n for subStr in partialStr:\n res.append(subStr)\n res.append(s[0] + subStr)\n return res\n\ndef isValid(char):\n if char == char[::-1]:\n return True\n\n# Complete the substrCount function below.\ndef substrCount(n, s):\n res = []\n allSubStr = getSubStr(s)\n print(allSubStr)\n for char in allSubStr:\n if isValid(char):\n res.append(char)\n print(res)\n return len(res)\n\n# print(substrCount(5, 'asasd'))\n# print(getSubStr(\"asasd\"))\n\ndef is_palindrome(s):\n if s == s[:: -1]:\n return True\n\ndef partition(s):\n res = []\n # Generate all the combination\n for i in range(1, len(s)):\n head = s[:i]\n if is_palindrome(head):\n rest = partition(s[i:])\n # print(rest)\n for elem in rest:\n res.append([head] + elem)\n if is_palindrome(s):\n res.append([s])\n return res\n\n\nprint(partition(\"geeks\"))\nprint(partition(\"geeksabccbaabc\"))\n\n\n" }, { "alpha_fraction": 0.600655734539032, "alphanum_fraction": 0.6131147742271423, "avg_line_length": 29.5, "blob_id": "55fb1c77b28c60413eddcf45ebe498b84fef4a4e", "content_id": "63487e9a515875cec6c732c899f43bc4ed93d112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1525, "license_type": "no_license", "max_line_length": 103, "num_lines": 50, "path": "/15112-CMU/week9/test2.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# Problem: given a list, a, produce a list containing all the possible subsets of a.\ndef powerset(a):\n # Base case: the only possible subset of an empty list is the empty list.\n if (len(a) == 0):\n return [ [] ]\n else:\n # Recursive Case: remove the first element, then find all subsets of the remaining list.\n # Then duplicate each subset into two versions: one without the first element, and one with it.\n partialSubsets = powerset(a[1:])\n allSubsets = [ ]\n for subset in partialSubsets:\n allSubsets.append(subset)\n allSubsets.append([a[0]] + subset)\n return allSubsets\n\n\ndef getOtherPart(lst, left):\n import copy\n lfCopy = copy.deepcopy(left)\n res = []\n for c in lst:\n if c not in lfCopy:\n res += [c]\n else:\n lfCopy.remove(c)\n return res\n\n\ndef divideAlistIntoTwoParts(lst):\n res = []\n allSubsets = powerset(lst)\n # print(allSubsets)\n for left in allSubsets:\n otherPart = getOtherPart(lst, left)\n res.append((left, otherPart))\n return res\n\ndef loadBalance(lst):\n res = None\n minivalue = sum(lst)\n allPossibleOutcomes = divideAlistIntoTwoParts(lst)\n for outcome in allPossibleOutcomes:\n if abs(sum(outcome[0]) - sum(outcome[1])) <= minivalue:\n minivalue = abs(sum(outcome[0]) - sum(outcome[1]))\n res = outcome\n return res\n\nprint(loadBalance([0, 1, 2]))\nprint(loadBalance([3, 6, 1, 7, 9, 8, 22, 3]))\nprint(loadBalance([]))\n" }, { "alpha_fraction": 0.6145833134651184, "alphanum_fraction": 0.6261574029922485, "avg_line_length": 77.45454406738281, "blob_id": "72e8679c297aa3a257d4a6d30eef46550feb1fc4", "content_id": "67616358362b96e8b2df6f2577aeedfb601c31e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 864, "license_type": "no_license", "max_line_length": 108, "num_lines": 11, "path": "/15112-CMU/Design Proposal and TP/TP/readme.txt", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "1.Hello everyone, my final project for the course 15112 is called miniFIFAonline3\nThe main purpose of my project is to make people have fun in their free time\n2.In order to run this game your just need to run the __init__ file, and the game will run automatically run\n3. Besides, you need to install pygame module in order to run it\n4 shortcut commands : press \"s\" on keyboard to change players or passball\n press \"d\" on keyboard to steal ball or shoot\n press \"a\" on keyboard to cross ball\n press \"e\" on keyboard to speed up the player you currently control\n press 'up', 'down', 'left', 'right', 'up' + 'left',\n 'up' + 'right', 'down' + 'left', 'down' + 'right' on keyboard to control players\n press \"r\" on keyboard to restart the game\n\n" }, { "alpha_fraction": 0.33981525897979736, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 31.920000076293945, "blob_id": "64504acdf4728eae74258cf44ae0eb551bbd5b66", "content_id": "3b2fb48d2f90d5b28a29af864b6145ec4de5cf9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4114, "license_type": "no_license", "max_line_length": 82, "num_lines": 125, "path": "/15112-CMU/week3/test6.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\ndef makeUpString(text):\n lines = text.split(\"\\n\")\n maxLenOfLine = len(max(lines, key=len))\n result = \"\"\n for line in lines:\n blanks = maxLenOfLine - len(line)\n result += (line + blanks * \" \" + \"\\n\")\n result = result[:-1]\n result = \"\".join(result)\n return result\n\ndef findRowOfCanvas(text):\n text = makeUpString(text)\n height = 0\n for c in text:\n if c == \"\\n\":\n height += 1\n height += 1\n return height\n\ndef findColOfcanvas(text):\n text = makeUpString(text)\n column = 0\n for line in text.split(\"\\n\"):\n column = len(line)\n break\n return column\n\n\ndef asciiDraw(canvas, artStr, width, height):\n newText = makeUpString(artStr)\n rowOfCanvas = findRowOfCanvas(newText)\n colOfCanvas = findColOfcanvas(newText)\n heightOfRectangle = height / rowOfCanvas\n widthOfRectangle = width / colOfCanvas\n j = 0\n for line in newText.split(\"\\n\"):\n for i in range(len(line)):\n left = 0 + i * widthOfRectangle\n top = 0 + j * heightOfRectangle\n right = left + widthOfRectangle\n bottom = top + heightOfRectangle\n color = \"\"\n if line[i] == \"0\":color = \"#000\"\n elif line[i] == \"1\": color = \"#00F\"\n elif line[i] == \"2\": color = \"#0F0\"\n elif line[i] == \"3\": color = \"#0FF\"\n elif line[i] == \"4\": color = \"#F00\"\n elif line[i] == \"5\": color = \"#F0F\"\n elif line[i] == \"6\": color = \"#FF0\"\n elif line[i] == \"7\": color = \"#FFF\"\n canvas.create_rectangle(left, top, right, bottom, fill=color, width=0)\n j += 1\n\n\ndef runAsciiDraw(artStr, width, height):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n asciiDraw(canvas, artStr, width, height)\n root.mainloop()\n\n\nartStr = ''' \n 1 2 4\n 111 222 444\n11111 22222 44444\n 111 222 444\n 1 2 4\n '''\n\n\nfacePattern = ''' \n 0022222222222222222\n 02222222222222222222222220\n 02222222222222222222222222222220 02 02 02\n 0 0 0 02222222222222222222222222222222222220 02 22 2202\n0 2 2 02 0222222222 2222222222222 2222222220 02202202\n022222202 0222222222 22222222222 22222222220 02222222\n 0222222 02222222222 22222222222 22222222222222222222222\n 02222222222222222222222 2222222222222 22222222222222 0222\n 022202222222222222222222222222222222222222222222222222222 0222\n 022 022222222222222222222222222222222222222222222222222 02220\n 0220 222222222222222222222222222222222222222222222222222 2220\n 022 222222222222222222222222222222222222222222222000222222022220\n 0222022222 2222222222222222222222222222222222222 022222222222222222\n 0222 202222 2222222222222222222222222222222222 02220\n 0222 0222 022222222222222222222222222220 0222\n 02220 02222222222222222220220 022\n 0220 02202222220 0222\n 02220 02220\n 022220 02222220022220 02222\n 0222220 022222222220 022220\n 0222220 022222222222220\n 02222222022222222222\n 022222222222\n 022222222222\n 02222222220\n 02220\n\n '''\n\n\n\nprint(repr(artStr))\nprint(artStr)\nprint(artStr.split(\"\\n\"))\nprint(\"hhh\")\nprint(makeUpString(artStr))\n# print(makeUpString(artStr).split(\"\\n\"))\nprint(\"hhhhhhhh\")\nprint(makeUpString(facePattern))\n\n# print(findRowOfCanvas(artStr))\n# print(findRowOfCanvas(\"0123\\n4567\"))\n# print(repr(makeUpString(\"0123\\n4567\")))\nprint(\"hhhhhh\")\n# print(findColOfcanvas(artStr))\n# asciiDraw(artStr, 600, 300)\n\nrunAsciiDraw(facePattern, 800, 600)" }, { "alpha_fraction": 0.5666463375091553, "alphanum_fraction": 0.5946341753005981, "avg_line_length": 27.82425308227539, "blob_id": "583d229d5890a88afb641575643c5bd97770c679", "content_id": "80f41e3f8604808fb850d6bb96de78b863cc5576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16400, "license_type": "no_license", "max_line_length": 95, "num_lines": 569, "path": "/15112-CMU/week4 cold cold/hw4.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw4\n# Your andrewID:mxu2\n# Your section: 2N\n#################################################\n\nimport math\nimport copy\n \n#################################################\n# Hw4 COLLABORATIVE problems\n#################################################\n# The problem in this section is COLLABORATIVE, which means you may\n# work on it with your classmates if you wish. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n\n\ndef lookAndSayCollaborators():\n return \"nobody\"\n\n\n# get the result in function lookAndSay\ndef getResult(i, listNum, count, result):\n if listNum[i] == listNum[i - 1]:\n result.append((count, listNum[i - 1]))\n else:\n result.append((1, listNum[i]))\n\n\n# Return \"reading off\" of the list using the look-and-say method\ndef lookAndSay(lst):\n listNum = list(lst)\n result = []\n count = 1\n if listNum == []:\n return []\n elif listNum != []:\n for i in range(len(listNum)):\n if (i + 1) < len(listNum) and listNum[i + 1] == listNum[i]:\n count += 1\n else:\n if i == 0 and listNum[i + 1] != listNum[i]:\n result.append((1, listNum[0]))\n elif i == 0 and listNum[i + 1] == listNum[i]:\n continue\n else:\n getResult(i, listNum, count, result)\n count = 1\n return result\n\n\ndef inverseLookAndSayCollaborators():\n return \"nobody\"\n\n\n# Return the inverse list of the function lookAndSay\ndef inverseLookAndSay(lst):\n result = []\n for element in lst:\n n = element[0]\n num = element[1]\n while n > 0:\n result.append(num)\n n -= 1\n return result\n\n#################################################\n# Hw4 SOLO problems\n#################################################\n\n\n# Remove all the repeats in a list without destruct original list\ndef nondestructiveRemoveRepeats(lst):\n result = []\n for num in lst:\n if num not in result:\n result.append(num)\n return result\n\n\n# Remove all the repeats in a list by destructing the original list\ndef destructiveRemoveRepeats(lst):\n for i in range(len(lst) - 1, -1, -1):\n if lst.count(lst[i]) > 1:\n lst.pop(i)\n\n# get value of each letter\ndef valueOfLetter(letterScores, letter):\n indexOfLetter = ord(letter) - 97\n value = letterScores[indexOfLetter]\n return value\n\n# get scores of target word list\ndef getScoresOfWords(letterScores, targetWordLst):\n scores = []\n for i in range(len(targetWordLst)):\n score = 0\n for j in range(len(targetWordLst[i])):\n score += valueOfLetter(letterScores, targetWordLst[i][j])\n scores.append(score)\n return scores\n\n# get index of target\ndef getIndexOfTarget(idx, scoreList, maxScore):\n i = 0\n while i < len(scoreList):\n if scoreList[i] == maxScore:\n idx.append(i)\n i += 1\n return idx\n\n# This is the final target word list\ndef finalTarget(lst, hand):\n handStr = \"\"\n for s in hand:\n handStr += s\n newList = []\n for c in lst:\n for char in c:\n if c.count(char) == handStr.count(char):\n if c not in newList:\n newList.append(c)\n return newList\n\n# This is the main function of best scrabble score,\n# which will return the highest score with word list\ndef bestScrabbleScore(dictionary, letterScores, hand):\n targetWordLst = []\n for word in dictionary:\n CharInHand = True\n for char in word:\n if char in hand:\n CharInHand = True\n else:\n CharInHand = False\n break\n if CharInHand == True:\n targetWordLst.append(word)\n targetWordLst = finalTarget(targetWordLst, hand)\n scoreList = getScoresOfWords(letterScores, targetWordLst)\n if not scoreList: # if scoreList == []:\n return None\n maxScore = max(scoreList)\n idx = []\n idx = getIndexOfTarget(idx, scoreList, maxScore)\n resultList = []\n for c in idx:\n resultList.append(targetWordLst[c])\n if len(resultList) == 1:\n return(resultList[0], maxScore)\n else:\n return(resultList, maxScore)\n\n\n#################################################\n# Hw4 Graphics & Animation Functions\n# All graphics must go under here to avoid angering the autograder!\n# ignore_rest\n#################################################\nfrom tkinter import *\n\n## Tortoise Animation functions ##\n## Note - the Tortoise animation is collaborative! ##\n\n## Tortoise Animation bonus features: none ##\n\ndef tortoiseAnimationCollaborators():\n return \"nobody\"\n\n\n# This is the init function of animation\ndef init(data):\n data.torX = data.width/2\n data.torY = data.height/2\n data.commands = listOfDataCode(data)\n data.commandsToDraw = []\n data.torToDraw = []\n data.counter = 0\n\n# Return list of data code\ndef listOfDataCode(data):\n lstOfDataCode = []\n for line in data.code.split(\"\\n\"):\n lstOfDataCode.append(line)\n lstOfDataCode.pop()\n lstOfDataCode = lstOfDataCode[1:]\n return lstOfDataCode\n\n# this is a mouse event handler\ndef mousePressed(event, data):\n pass\n\n# this is a keyboard event handler\ndef keyPressed(event, data):\n if (event.keysym == \"Return\"):\n if data.counter < len(data.commands):\n data.commandsToDraw.append((data.commands[data.counter], data.counter))\n data.counter += 1\n\n# Get information of each command\ndef getInformationOfCommands(text, color, x, y, angle):\n if text.startswith('color'):\n color = text[6:]\n elif text.startswith('left'):\n angle = angle - int(text[5:])\n elif text.startswith('right'):\n angle = angle + int(text[6:])\n return color, x, y, angle\n\n# This is the run current line function\ndef runProgram(canvas, data, currentLine):\n color = \"\"\n angle = 0\n for i in range(currentLine):\n canvas.create_text(data.width/50,\n data.height/20 + data.commandsToDraw[i][1]*data.height/30,\n text=str(data.commandsToDraw[i][0]), anchor=\"w\", fill='gray')\n getInformationOfCommands(data.commandsToDraw[i][0], color, data.torX, data.torY, angle)\n\n\n# this function get the coordinate of the black rectangle\ndef getCoordinateOfRectangle(width, height):\n recx0 = 0\n recy0 = (9 / 10) * height\n recx1 = width\n recy1 = height\n return recx0, recy0, recx1, recy1\n\n# this function get the color of the small rectangles\ndef getColorOfBox(i):\n if i == 0:\n color = 'red'\n elif i == 1:\n color = 'orange'\n elif i == 2:\n color = 'yellow'\n elif i == 3:\n color = 'green'\n elif i == 4:\n color = 'blue'\n elif i == 5:\n color = 'purple'\n else:\n color = 'white'\n return color\n\n# this function get the coordinates of the small rectangles\ndef coordinatesInLoop(margin, i, widthOfRectangles, recy0):\n x0 = margin + i * widthOfRectangles + i * margin\n y0 = recy0 + margin\n x1 = x0 + widthOfRectangles\n y1 = y0 + widthOfRectangles - margin\n return x0, y0, x1, y1\n\n# this is the main animation function\ndef redrawAll(canvas, data):\n currentLine = data.counter\n runProgram(canvas, data, currentLine)\n margin = (1/100)*data.height\n widthOfRectangles = (data.width - 11 * margin)/10\n recx0, recy0, recx1, recy1 = getCoordinateOfRectangle(data.width, data.height)\n canvas.create_rectangle(recx0, recy0, recx1, recy1, fill='black')\n for i in range(10):\n x0, y0, x1, y1 = coordinatesInLoop(margin, i, widthOfRectangles, recy0)\n color = getColorOfBox(i)\n canvas.create_rectangle(x0, y0, x1, y1, fill=color)\n\n\n\"\"\" This function is provided as part of the starter code.\nYou don't need to change it, but you should call it!\"\"\"\ndef drawArrow(canvas, x, y, angle):\n offset = 135\n r = 10\n x1 = x + r*math.cos(math.radians(angle))\n y1 = y - r*math.sin(math.radians(angle))\n x2 = x + r*math.cos(math.radians(angle + offset))\n y2 = y - r*math.sin(math.radians(angle + offset))\n x3 = x + r*math.cos(math.radians(angle - offset))\n y3 = y - r*math.sin(math.radians(angle - offset))\n canvas.create_polygon(x1, y1, x2, y2, x3, y3, fill=\"black\")\n\n\n### Timeline Game is a bonus problem, and therefore optional ###\n# Note: Bonus problems are solo. Do not collaborate on bonus problems.\n\n## Timeline Game functions ##\n\n\"\"\" This function is provided as part of the starter code.\nYou don't need to change it, but you should call it!\"\"\"\ndef starterCards():\n import random\n cards = [ (\"Domestication of the Cat\", -4500),\n (\"Creation of the Pythagorean Theorem\", -548),\n (\"Invention of Chess\", 570),\n (\"First Calculating Machine\", 1642), \n (\"Invention of the Telegraph\", 1837),\n (\"Invention of Morse Code\", 1838),\n (\"Invention of the Plastic Bottle\", 1963), \n (\"Invention of the Computer Mouse\", 1963), \n (\"Invention of the Laptop Computer\", 1981),\n (\"First Public Internet Access\", 1990)\n ]\n random.shuffle(cards)\n return cards\n\ndef initTimeline(data):\n pass\n\ndef mousePressedTimeline(event, data):\n pass\n\ndef keyPressedTimeline(event, data):\n pass\n\ndef redrawAllTimeline(canvas, data):\n pass\n\n#################################################\n# Hw4 Test Functions\n#################################################\n\ndef _verifyLookAndSayIsNondestructive():\n a = [1,2,3]\n b = copy.copy(a)\n lookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\ndef testLookAndSay():\n print(\"Testing lookAndSay()...\", end=\"\")\n assert(_verifyLookAndSayIsNondestructive() == True)\n assert(lookAndSay([]) == [])\n assert(lookAndSay([1,1,1]) == [(3,1)])\n assert(lookAndSay([-1,2,7]) == [(1,-1),(1,2),(1,7)])\n assert(lookAndSay([3,3,8,-10,-10,-10]) == [(2,3),(1,8),(3,-10)])\n print(\"Passed.\")\n\ndef _verifyInverseLookAndSayIsNondestructive():\n a = [(1,2), (2,3)]\n b = copy.copy(a)\n inverseLookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\ndef testInverseLookAndSay():\n print(\"Testing inverseLookAndSay()...\", end=\"\")\n assert(_verifyInverseLookAndSayIsNondestructive() == True)\n assert(inverseLookAndSay([]) == [])\n assert(inverseLookAndSay([(3,1)]) == [1,1,1])\n assert(inverseLookAndSay([(1,-1),(1,2),(1,7)]) == [-1,2,7])\n assert(inverseLookAndSay([(2,3),(1,8),(3,-10)]) == [3,3,8,-10,-10,-10])\n print(\"Passed.\")\n\ndef runTortoiseAnimation(code, width=500, height=500):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.code = code\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n root.mainloop() # blocks until window is closed\n\ndef testTortoiseAnimation():\n print(\"Running Tortoise Animation...\", end=\"\")\n runTortoiseAnimation(\"\"\"\n# This is a simple tortoise program\ncolor blue\nmove 50\n\nleft 90\n\ncolor red\nmove 100\n\ncolor none # turns off drawing\nmove 50\n\nright 45\n\ncolor green # drawing is on again\nmove 50\n\nright 45\n\ncolor orange\nmove 50\n\nright 90\n\ncolor purple\nmove 100\n\"\"\")\n runTortoiseAnimation(\"\"\"\n# Y\ncolor red\nright 45\nmove 50\nright 45\nmove 50\nright 180\nmove 50\nright 45\nmove 50\ncolor none # space\nright 45\nmove 25\n\n# E\ncolor green\nright 90\nmove 85\nleft 90\nmove 50\nright 180\nmove 50\nright 90\nmove 42\nright 90\nmove 50\nright 180\nmove 50\nright 90\nmove 43\nright 90\nmove 50 # space\ncolor none\nmove 25\n\n# S\ncolor blue\nmove 50\nleft 180\nmove 50\nleft 90\nmove 43\nleft 90\nmove 50\nright 90\nmove 42\nright 90\nmove 50\n\"\"\")\n print(\"Done.\")\n\ndef _verifyNondestructiveRemoveRepeatsIsNondestructive():\n a = [3, 5, 3, 3, 6]\n b = copy.copy(a)\n # ignore result, just checking for destructiveness here\n nondestructiveRemoveRepeats(a)\n return (a == b)\n\ndef testNondestructiveRemoveRepeats():\n print(\"Testing nondestructiveRemoveRepeats()\", end=\"\")\n assert(_verifyNondestructiveRemoveRepeatsIsNondestructive())\n assert(nondestructiveRemoveRepeats([1,3,5,3,3,2,1,7,5]) == [1,3,5,2,7])\n assert(nondestructiveRemoveRepeats([1,2,3,-2]) == [1,2,3,-2])\n print(\"Passed.\")\n\ndef testDestructiveRemoveRepeats():\n print(\"Testing destructiveRemoveRepeats()\", end=\"\")\n a = [1,3,5,3,3,2,1,7,5]\n assert(destructiveRemoveRepeats(a) == None)\n assert(a == [1,3,5,2,7])\n b = [1,2,3,-2]\n assert(destructiveRemoveRepeats(b) == None)\n assert(b == [1,2,3,-2])\n print(\"Passed.\")\n\ndef testBestScrabbleScore():\n print(\"Testing bestScrabbleScore()...\", end=\"\")\n def d1(): return [\"a\", \"b\", \"c\"]\n def ls1(): return [1] * 26\n def d2(): return [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"] \n def ls2(): return [1 + (i % 5) for i in range(26)]\n assert(bestScrabbleScore(d1(), ls1(), [\"b\"]) == (\"b\", 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"a\", \"c\", \"e\"]) == ([\"a\", \"c\"], 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"b\"]) == (\"b\", 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"z\"]) == None)\n # x = 4, y = 5, z = 1\n # [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\n # 10 10 7 10 9 -\n assert(bestScrabbleScore(d2(), ls2(), [\"x\",\"y\",\"z\"]) == ([\"xyz\",\"zxy\"], 10))\n assert(bestScrabbleScore(d2(), ls2(), \n [\"x\", \"y\", \"z\", \"y\"]) == ([\"xyz\", \"zxy\", \"yy\"], 10))\n assert(bestScrabbleScore(d2(), ls2(), [\"x\", \"y\", \"q\"]) == (\"yx\", 9))\n assert(bestScrabbleScore(d2(), ls2(), [\"y\", \"z\", \"z\"]) == (\"zzy\", 7))\n assert(bestScrabbleScore(d2(), ls2(), [\"w\", \"x\", \"z\"]) == None)\n print(\"Passed.\")\n\ndef runTimelineGame(width=1200, height=400):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAllTimeline(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressedTimeline(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressedTimeline(event, data)\n redrawAllWrapper(canvas, data)\n\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n initTimeline(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n root.bind(\"<Button-1>\", lambda event:\n\n\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n root.mainloop() # blocks until window is closed\n\n#################################################\n# Hw4 Main\n#################################################\n\ndef testAll():\n ## Collaborative Functions ##\n testLookAndSay()\n testInverseLookAndSay()\n testTortoiseAnimation()\n ## Solo Functions ##\n testNondestructiveRemoveRepeats()\n testDestructiveRemoveRepeats()\n testBestScrabbleScore()\n \n # Uncomment the next line if you want to try the bonus!\n #runTimelineGame()\n\ndef main():\n testAll()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.4306122362613678, "alphanum_fraction": 0.4571428596973419, "avg_line_length": 34, "blob_id": "90b48df972784567ec10a1075924c8086ee60327", "content_id": "ae415de8d5525bb373bad43cd7114877b66686db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 63, "num_lines": 14, "path": "/15112-CMU/week10/myjoin.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from functools import reduce\n# this is the lambda function for myJoin\nmyJoin = lambda L, sep: str(L[0])\\\n if len(L) == 1 else reduce(lambda x, y: (x + y),\n list(map(lambda x: str(x) + sep,\n L[:-1]))) + str(L[-1])\n\nprint(myJoin([1, 2, 'c', 'd'], ''))\nprint(myJoin(['a','b','c'], '-'))\nprint(type(myJoin([1, 2, 'c', 'd'],\"\")))\nprint(myJoin([42], ''))\n# l = [42]\n# a = list(map(lambda x: str(x) + \"\", l[:-1]))\n# print(a)\n" }, { "alpha_fraction": 0.4771634638309479, "alphanum_fraction": 0.5093482732772827, "avg_line_length": 43.838321685791016, "blob_id": "c6a360c84c84f9edabc2ae6c11594cc1a2eac824", "content_id": "e7d6a96a99344ad04067c01617c3ffd5e284909b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7488, "license_type": "no_license", "max_line_length": 103, "num_lines": 167, "path": "/15112-CMU/week10/animation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\ndef init(data):\n data.level = 1\n data.depth = -1\n data.color = ['yellow', 'red', 'orange', '#FFE4C4', 'blue']\n data.lineColor = ['#F5FFFA', 'yellow', 'orange', 'purple', 'pink']\n data.smallCircleColor = ['#F5FFFA', 'red', 'orange', 'purple', 'pink']\n\ndef drawCircle(canvas, data, xc, yc, r, depth):\n canvas.create_oval(xc - r / 4, (yc - 2 * r) - r / 4,\n xc + r / 4, (yc - 2 * r) + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc - r / 4, (yc + 2 * r) - r / 4,\n xc + r / 4, (yc + 2 * r) + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval((xc - 2 * r) - r / 4, yc - r / 4,\n (xc - 2 * r) + r / 4, yc + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval((xc + 2 * r) - r / 4, yc - r / 4,\n (xc + 2 * r) + r / 4, yc + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc + 2 ** 0.5 * r - r / 4, yc - 2 ** 0.5 * r - r / 4,\n xc + 2 ** 0.5 * r + r / 4, yc - 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc - 2 ** 0.5 * r - r / 4, yc - 2 ** 0.5 * r - r / 4,\n xc - 2 ** 0.5 * r + r / 4, yc - 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc - 2 ** 0.5 * r - r / 4, yc + 2 ** 0.5 * r - r / 4,\n xc - 2 ** 0.5 * r + r / 4, yc + 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n canvas.create_oval(xc + 2 ** 0.5 * r - r / 4, yc + 2 ** 0.5 * r - r / 4,\n xc + 2 ** 0.5 * r + r / 4, yc + 2 ** 0.5 * r + r / 4,\n fill=data.smallCircleColor[depth])\n\n\ndef drawLinesAndCircles(canvas, data, xc, yc, r, depth):\n canvas.create_line(xc, yc, xc, yc - 2 * r, fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc, yc + 2 * r, fill=data.lineColor[depth])\n canvas.create_line(xc + 2 * r, yc, xc, yc, fill=data.lineColor[depth])\n canvas.create_line(xc - 2 * r, yc, xc, yc, fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc + 2 ** 0.5 * r, yc - 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc - 2 ** 0.5 * r, yc - 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc - 2 ** 0.5 * r, yc + 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n canvas.create_line(xc, yc, xc + 2 ** 0.5 * r, yc + 2 ** 0.5 * r,\n fill=data.lineColor[depth])\n drawCircle(canvas, data, xc, yc, r, depth)\n\n\ndef drawFractalSun(data, canvas, xc, yc, r, level, depth):\n if level == 0:\n # this following code is only for state level == 0\n margin = min(data.width, data.height) // 10\n canvas.create_oval(data.width // 2 - 0.6 * data.width // 5,\n data.height // 2 - 0.6 * data.width // 5,\n data.width // 2 + 0.6 * data.width // 5,\n data.height // 2 + 0.6 * data.width // 5, fill= '#FFDAB9')\n canvas.create_text(data.width / 2, 0,\n text=\"Level %d Fractal\" % (data.level),\n font=\"Arial \" + str(int(margin / 3)) + \" bold\",\n anchor=\"n\", fill='white')\n canvas.create_text(data.width / 2, margin,\n text=\"Use arrows to change level\",\n font=\"Arial \" + str(int(margin / 4)),\n anchor=\"s\", fill='white')\n elif level == 1:\n drawLinesAndCircles(canvas, data, xc, yc, r, depth)\n canvas.create_oval(xc - r, yc - r, xc + r, yc + r, fill=data.color[depth])\n else:\n drawFractalSun(data, canvas, xc, yc, r, level - 1, depth + 1)\n drawFractalSun(data, canvas, xc, yc - 2 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc, yc + 2 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc - 2 * r, yc, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc + 2 * r, yc, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc + 2 ** 0.5 * r, yc - 2 ** 0.5 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc - 2 ** 0.5 * r, yc - 2 ** 0.5 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc - 2 ** 0.5 * r, yc + 2 ** 0.5 * r, r / 4, level - 1, depth - 1)\n drawFractalSun(data, canvas, xc + 2 ** 0.5 * r, yc + 2 ** 0.5 * r, r / 4, level - 1, depth - 1)\n\n\n\ndef keyPressed(event, data):\n if event.keysym in [\"Up\", \"Right\"]:\n data.level += 1\n elif (event.keysym in [\"Down\", \"Left\"]) and (data.level > 0):\n data.level -= 1\n\n\ndef drawCanvas(canvas, data):\n topX = 0\n topY = 0\n canvas.create_rectangle(topX, topY, data.width, data.height, fill= 'black')\n\n\ndef redrawAll(canvas, data):\n drawCanvas(canvas, data)\n margin = min(data.width, data.height)//10\n xc, yc = data.width // 2, data.height // 2\n r = 0.6*data.width // 5\n drawFractalSun(data, canvas, xc, yc, r, data.level, data.depth)\n canvas.create_text(data.width / 2, 0,\n text=\"Level %d Fractal\" % (data.level),\n font=\"Arial \" + str(int(margin / 3)) + \" bold\",\n anchor=\"n\", fill = 'white')\n canvas.create_text(data.width / 2, margin,\n text=\"Use arrows to change level\",\n font=\"Arial \" + str(int(margin / 4)),\n anchor=\"s\", fill = 'white')\n\ndef mousePressed(event, data): pass\n\ndef timerFired(data): pass\n\n# Updated Animation Starter Code\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=500, height=500):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(500, 500)\n" }, { "alpha_fraction": 0.5918867588043213, "alphanum_fraction": 0.6093111634254456, "avg_line_length": 35.009803771972656, "blob_id": "f619d714b39523f766d3296040c89f4f79603034", "content_id": "9f4c4fed2efa4e9bed67a29ad84199c424b2e69b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3673, "license_type": "no_license", "max_line_length": 74, "num_lines": 102, "path": "/15112-CMU/Design Proposal and TP/TP/Ball.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this file is mainly for the ball class\nfrom Background import *\n\n\n# this function calculate the dimension of boundary\ndef boundaryElem():\n marginWidth = 35\n marginHeight = 17\n scoreTableHeight = 60\n topLeftX = marginWidth\n topLeftY = scoreTableHeight + marginHeight\n displayWidth = 1100\n displayHeight = 620\n fieldWidth = displayWidth - 2 * marginWidth\n fieldHeight = displayHeight - 2 * marginHeight\n return topLeftX, topLeftY, fieldWidth, fieldHeight,\\\n marginWidth, marginHeight, scoreTableHeight,\\\n displayWidth, displayHeight\n\n\n# the move method of the ball is search\n# on google in pygame documentation\n# https://stackoverflow.com/questions/43171369/\n# using-self-rect-centerx-in-pygame-to-move-character-at-random\n\n\n# this is the class for soccer ball\nclass Ball(pygame.sprite.Sprite):\n def __init__(self):\n super(Ball, self).__init__()\n self.owner = None\n self.isMoving = False\n ballSize = 15\n ballImg = pygame.image.load('assets/images/ball/ball1.png')\n self.image = pygame.transform.scale(ballImg, (ballSize, ballSize))\n self.rect = self.image.get_rect()\n self.speed = (0, 0)\n self.friction = 0.95\n displayWidth = getGameDimension()[0]\n displayHeight = getGameDimension()[1]\n scoreTableHeight = getGameDimension()[3]\n self.rect.centerx = displayWidth // 2\n self.rect.centery = scoreTableHeight + displayHeight // 2.2\n self.boundary = pygame.Rect(boundaryElem()[0], boundaryElem()[1],\n boundaryElem()[2], boundaryElem()[3])\n\n # update the ball's position\n def update(self):\n offset = 20\n offset2 = 15\n if self.owner is not None:\n self.speed = (0, 0)\n elif self.owner is None and self.speed == (0, 0):\n self.isMoving = False\n elif self.owner is None and self.speed != (0, 0):\n self.rect.x += self.speed.x\n self.rect.y += self.speed.y\n # the soccer ball will stop moving due to the friction\n self.speed *= self.friction\n # check whether or not the ball is collide with\n # boundary of the soccer field\n if self.rect.x < self.boundary.left:\n self.rect.x = self.boundary.left\n self.speed.x = - self.speed.x\n elif self.rect.x > self.boundary.right - offset2:\n self.rect.x = self.boundary.right - offset2\n self.speed.x = - self.speed.x\n elif self.rect.y < self.boundary.top:\n self.rect.y = self.boundary.top\n self.speed.y = - self.speed.y\n elif self.rect.y > self.boundary.bottom - offset:\n self.rect.y = self.boundary.bottom - offset\n self.speed.y = - self.speed.y\n\n # this is the passBall method\n def passBall(self, vel):\n self.isMoving = True\n self.owner = None\n self.speed = vel\n\n # this is the shoot ball method\n def shoot(self, vel):\n self.isMoving = True\n self.owner = None\n self.speed = vel * 30\n\n # this method move's the ball to it's initial position\n def generateNewBall(self):\n displayWidth = getGameDimension()[0]\n displayHeight = getGameDimension()[1]\n scoreTableHeight = getGameDimension()[3]\n self.rect.centerx = displayWidth // 2\n self.rect.centery = scoreTableHeight + displayHeight // 2.2\n self.owner = None\n self.speed = (0, 0)\n\n# this is the generate ball function\ndef generateBallObject():\n ball = Ball()\n return ball\n\nball = generateBallObject()\n" }, { "alpha_fraction": 0.591289758682251, "alphanum_fraction": 0.6099944114685059, "avg_line_length": 32.77358627319336, "blob_id": "ebe9e6b55a17b72dccc2ed52d24df8b2c4067220", "content_id": "363b00e4d7b9f1f843244bf30d00e5454869fadd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3582, "license_type": "no_license", "max_line_length": 80, "num_lines": 106, "path": "/15112-CMU/week4 cold cold/case3.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n# Grid Demo\n\ndef init(data):\n data.rows = 4\n data.cols = 8\n data.margin = 5 # margin around grid\n data.selection = (-1, -1) # (row, col) of selection, (-1,-1) for none\n\ndef pointInGrid(x, y, data):\n # return True if (x, y) is inside the grid defined by data.\n return ((data.margin <= x <= data.width-data.margin) and\n (data.margin <= y <= data.height-data.margin))\n\ndef getCell(x, y, data):\n # aka \"viewToModel\"\n # return (row, col) in which (x, y) occurred or (-1, -1) if outside grid.\n if (not pointInGrid(x, y, data)):\n return (-1, -1)\n gridWidth = data.width - 2*data.margin\n gridHeight = data.height - 2*data.margin\n cellWidth = gridWidth / data.cols\n cellHeight = gridHeight / data.rows\n row = (y - data.margin) // cellHeight\n col = (x - data.margin) // cellWidth\n # triple-check that we are in bounds\n row = min(data.rows-1, max(0, row))\n col = min(data.cols-1, max(0, col))\n print(row,col)\n return (row, col)\n\ndef getCellBounds(row, col, data):\n # aka \"modelToView\"\n # returns (x0, y0, x1, y1) corners/bounding box of given cell in grid\n gridWidth = data.width - 2*data.margin\n gridHeight = data.height - 2*data.margin\n columnWidth = gridWidth / data.cols\n rowHeight = gridHeight / data.rows\n x0 = data.margin + col * columnWidth\n x1 = data.margin + (col+1) * columnWidth\n y0 = data.margin + row * rowHeight\n y1 = data.margin + (row+1) * rowHeight\n return (x0, y0, x1, y1)\n\ndef mousePressed(event, data):\n (row, col) = getCell(event.x, event.y, data)\n # select this (row, col) unless it is selected\n if (data.selection == (row, col)):\n data.selection = (-1, -1)\n else:\n data.selection = (row, col)\n\ndef keyPressed(event, data):\n pass\n\ndef redrawAll(canvas, data):\n # draw grid of cells\n for row in range(data.rows):\n for col in range(data.cols):\n (x0, y0, x1, y1) = getCellBounds(row, col, data)\n fill = \"orange\" if (data.selection == (row, col)) else \"cyan\"\n canvas.create_rectangle(x0, y0, x1, y1, fill=fill)\n canvas.create_text(data.width/2, data.height/2 - 15, text=\"Click in cells!\",\n font=\"Arial 26 bold\", fill=\"darkBlue\")\n\n\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 200)\n\n\n" }, { "alpha_fraction": 0.697621762752533, "alphanum_fraction": 0.7083805203437805, "avg_line_length": 41, "blob_id": "0f5192cfddb36f4e353958af699fed156dbbab6e", "content_id": "bd320731073f4e335385363504ff58261f136398", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 157, "num_lines": 42, "path": "/15112-CMU/FIFAworldcup copy2/Computer.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport os\nimport time\n\nfrom const import *\nfrom functions import *\nfrom FBPlayer import *\n\nclass Computer(FBPlayer):\n def __init__(self, centerx, centery):\n super(Computer, self).__init__(RED_TEAM, centerx, centery)\n\n def handle(self, ball):\n pass\n\n def leadToGoal(self):\n goalRect = pygame.Rect(0, TABLE_SCORE_HEIGHT + GAME_HEIGHT / 2 - 100, 50, 200)\n\n distances = {\n UP: caculateDistance((self.controlRect.centerx, self.controlRect.centery - SPEED_DEFAULT), goalRect.center),\n DOWN: caculateDistance((self.controlRect.centerx, self.controlRect.centery + SPEED_DEFAULT), goalRect.center),\n RIGHT: caculateDistance((self.controlRect.centerx + SPEED_DEFAULT, self.controlRect.centery), goalRect.center),\n LEFT: caculateDistance((self.controlRect.centerx - SPEED_DEFAULT, self.controlRect.centery), goalRect.center),\n UP_LEFT: caculateDistance((self.controlRect.centerx - SPEED_DEFAULT / SQRT_2, self.controlRect.centery - SPEED_DEFAULT / SQRT_2), goalRect.center),\n UP_RIGHT: caculateDistance((self.controlRect.centerx + SPEED_DEFAULT / SQRT_2, self.controlRect.centery - SPEED_DEFAULT / SQRT_2), goalRect.center),\n DOWN_LEFT: caculateDistance((self.controlRect.centerx - SPEED_DEFAULT / SQRT_2, self.controlRect.centery + SPEED_DEFAULT / SQRT_2), goalRect.center),\n DOWN_RIGHT: caculateDistance((self.controlRect.centerx + SPEED_DEFAULT / SQRT_2, self.controlRect.centery + SPEED_DEFAULT / SQRT_2), goalRect.center),\n }\n\n direction = UP\n distance = distances[UP]\n\n for key, value in distances.items():\n if value < distance:\n distance = value\n direction = key\n \n if distance < 1:\n return\n\n self.updateDirection(direction)\n self.move(direction)\n\n\n" }, { "alpha_fraction": 0.4887189269065857, "alphanum_fraction": 0.6401529908180237, "avg_line_length": 37.970149993896484, "blob_id": "3965a637819d00c15e2e7496d84666b008713896", "content_id": "e405d77ff0736ffd63a33b651bc2fab5a9547b59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2615, "license_type": "no_license", "max_line_length": 71, "num_lines": 67, "path": "/15112-CMU/week1/week1/test.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# The following function is from this week's lecture notes\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n # You do not need to understand how this function works.\n import decimal\n rounding = decimal.ROUND_HALF_UP\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n\ndef transformRGB(rgb1, rgb2, midpoints, n):\n blue1 = rgb1 % 10 ** 3\n green1 = (rgb1 // 10 ** 3) % (10 ** 3)\n red1 = (rgb1 // 10 ** 3) // 10 ** 3\n blue2 = rgb2 % 10 ** 3\n green2 = (rgb2 // 10 ** 3) % (10 ** 3)\n red2 = (rgb2 // 10 ** 3) // 10 ** 3\n equalblue = (blue1 - blue2) / (midpoints + 1)\n equalgreen = (green1 - green2) / (midpoints + 1)\n equalred = (red1 - red2) / (midpoints + 1)\n targetblue = roundHalfUp(blue1 - equalblue * n)\n targetgreen = roundHalfUp(green1 - equalgreen * n)\n targetred = roundHalfUp(red1 - equalred * n)\n\n if len(str(targetgreen)) == 1:\n targetgreen = \"00\" + str(targetgreen)\n elif len(str(targetgreen)) == 2:\n targetgreen = \"0\" + str(targetgreen)\n else:\n targetgreen = str(targetgreen)\n\n if len(str(targetblue)) == 1:\n targetblue = \"00\" + str(targetblue)\n elif len(str(targetblue)) == 2:\n targetblue = \"0\" + str(targetblue)\n else:\n targetblue = str(targetblue)\n\n targetred = str(targetred)\n return targetred + targetgreen + targetblue\n\n\ndef colorBlender(rgb1, rgb2, midpoints, n):\n if n < 0 or n > (midpoints+1):\n return None\n elif 0 <= n <= (midpoints + 1):\n return int(transformRGB(rgb1, rgb2, midpoints, n))\n\n\ndef testColorBlender():\n print(\"Testing colorBlender()...\", end=\"\")\n # http://meyerweb.com/eric/tools/color-blend/#DC143C:BDFCC9:3:rgbd\n assert(colorBlender(220020060, 189252201, 3, -1) == None)\n assert(colorBlender(220020060, 189252201, 3, 0) == 220020060)\n assert(colorBlender(220020060, 189252201, 3, 1) == 212078095)\n assert(colorBlender(220020060, 189252201, 3, 2) == 205136131)\n assert(colorBlender(220020060, 189252201, 3, 3) == 197194166)\n assert(colorBlender(220020060, 189252201, 3, 4) == 189252201)\n assert(colorBlender(220020060, 189252201, 3, 5) == None)\n # http://meyerweb.com/eric/tools/color-blend/#0100FF:FF0280:2:rgbd\n assert(colorBlender(1000255, 255002128, 2, -1) == None)\n assert(colorBlender(1000255, 255002128, 2, 0) == 1000255)\n assert(colorBlender(1000255, 255002128, 2, 1) == 86001213)\n assert(colorBlender(1000255, 255002128, 2, 2) == 170001170)\n assert(colorBlender(1000255, 255002128, 2, 3) == 255002128)\n print(\"Passed.\")\n\ntestColorBlender()\n\n\n\n\n" }, { "alpha_fraction": 0.5757631659507751, "alphanum_fraction": 0.5966697335243225, "avg_line_length": 30.799999237060547, "blob_id": "3ff60d7c1ad9b0b02bd9a3a45fdb3b2f214efdb6", "content_id": "b25ea99a160e35e9d6a92e7a7966353227d593ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5405, "license_type": "no_license", "max_line_length": 86, "num_lines": 170, "path": "/15112-CMU/week5/hw5-ball.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw5\n# Your andrewID:mxu2\n# Your section: 2N\n#################################################\n\nimport random\n\n#################################################\n# Hw5 Ball Problem\n#################################################\nfrom tkinter import *\n\n\n# This is the model of the animation\ndef init(data):\n data.score = 0\n data.rx0 = 0\n data.ry0 = 0\n data.rcSpeedX = 20\n data.rcSpeedY = 20\n data.ballSpeedX = 2\n data.ballSpeedY = 2\n data.timerDelay = 10\n data.isPaused = False\n data.ballPosition = [random.randrange(data.rx0, data.rx0 + 1.5 * data.width),\n random.randrange(data.ry0, data.ry0 + 1.5 * data.height)]\n data.ballSize = data.width / 10\n\n\n# This is the mouse press handler\ndef mousePressed(event, data):\n if data.ballPosition[0] < event.x < data.ballPosition[0] + data.ballSize \\\n and data.ballPosition[1] < event.y < data.ballPosition[1] + data.ballSize:\n data.isPaused = not data.isPaused\n\n\n# This function is the keyboard handler\ndef keyPressed(event, data):\n if event.keysym == \"Left\":\n data.rx0 += data.rcSpeedX\n data.ballPosition[0] += data.rcSpeedX\n elif event.keysym == \"Right\":\n data.rx0 -= data.rcSpeedX\n data.ballPosition[0] -= data.rcSpeedX\n elif event.keysym == \"Up\":\n data.ry0 += data.rcSpeedY\n data.ballPosition[1] += data.rcSpeedY\n elif event.keysym == \"Down\":\n data.ry0 -= data.rcSpeedY\n data.ballPosition[1] -= data.rcSpeedY\n\n\n# This is the timer function\ndef timerFired(data):\n if not data.isPaused:\n doStep(data)\n if data.ballPosition[0] + data.ballSize < 0 \\\n or data.ballPosition[0] > data.width \\\n or data.ballPosition[1] + data.ballSize < 0 \\\n or data.ballPosition[1] > data.height:\n data.score += 0\n else:\n data.score += 1\n\n\n# This is the do step function the will be called regularly according to the timer\ndef doStep(data):\n data.ballPosition[0] += data.ballSpeedX\n if data.ballPosition[0] < data.rx0 or\\\n data.ballPosition[0] + data.ballSize > data.rx0 + 1.5 * data.width:\n data.ballSpeedX = - data.ballSpeedX\n data.ballPosition[0] += data.ballSpeedX\n\n data.ballPosition[1] += data.ballSpeedY\n if data.ballPosition[1] < data.ry0 or\\\n data.ballPosition[1] + data.ballSize > data.ry0 + 1.5 * data.height:\n data.ballSpeedY = - data.ballSpeedY\n data.ballPosition[1] += data.ballSpeedY\n\n\n# This function draws the prompt's background\ndef drawBackground(canvas, data):\n left = 0\n top = 0\n right = data.width\n bottom = data.height\n canvas.create_rectangle(left, top, right, bottom, fill = \"violet\")\n\n\n# This function draws the score of the game\ndef drawScore(canvas, data):\n canvas.create_text(data.width/15, data.height/15,\n text = data.score, font = (\"Helvetica\", 20))\n\n\n# This function draw the large rectangle\ndef drawRectangle(canvas, data):\n x0 = data.rx0\n y0 = data.ry0\n x1 = data.rx0 + 1.5 * data.width\n y1 = data.ry0 + 1.5 * data.height\n canvas.create_rectangle(x0, y0, x1, y1, fill=\"violet\", width = 5)\n\n\n# This is the function that draws the ball\ndef drawBall(canvas, data):\n canvas.create_oval(data.ballPosition[0], data.ballPosition[1],\n data.ballPosition[0] + data.ballSize,\n data.ballPosition[1] + data.ballSize,\n fill = \"Blue\")\n\n\n# This is main drawing function\ndef redrawAll(canvas, data):\n drawBackground(canvas, data)\n drawRectangle(canvas, data)\n drawScore(canvas, data)\n drawBall(canvas, data)\n\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 400)" }, { "alpha_fraction": 0.5350778102874756, "alphanum_fraction": 0.5462626814842224, "avg_line_length": 34.43490982055664, "blob_id": "cd75e4097878ed3aa1232b854ff8c7070ee109d7", "content_id": "5aaa2ef2871c5ad55a92d6bf249c8512ebf582f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23961, "license_type": "no_license", "max_line_length": 141, "num_lines": 676, "path": "/15112-CMU/BeerGame/_FastRenderGroup.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\n#\n#\n#\n#\n\n\"\"\"\nFastRenderGroup\n\nVERSION: you get version information here as a string\nVERSION_NUMBER: you get a tuple containing (release, subrelease, revision)\n so you can compare the numbers\n\nchangelog:\n11.07.2007:\n- pygame 1.8 feature blendmode activated, removed old code\n\n07.07.2007: \n- renamed both groups to LayeredUpdates and LayeredDirty as suggested by illume\n- made the _get_visible and _set_visible property such that they can be \n overwritten.\n\n05.07.2007: (v1.1.83)\n- change the timing code to switch between modes (prolbem of finding the \n treshold value is still there)\n- set_timing_treshold() added\n- LayeredRenderGroup.layers() now usese a set()\n\n29.06.07: (v1.1.78)\n- moved unittests to fastrendergroup_test.py\n- cleaned up the code using pylint (still some strange things in here)\n- docstrings should be complete,although perhaps not as clear as they should be\n- testsprite.py modified to be able to test FastRenderGroup in there too\n- written a multi_FRG_demo.py to demonstrate that all sprites MUST be in one\n single FRG to work correctly\n- DirtySprite._layer is now READ only (using an _ now)\n- DirtySprite.visible is a property now (so you do not habe to worry to set\n the sprite dirty when changing the visibility state\n\n11.06.07: (v1.1.72)\n- unittest for LayeredRenderGroup added\n\n09.06.07: (v1.1.72)\n- doc strings\n- removed old FRG\n- changed to v1.1\n\n07.06.07: (v1.0.70)\n- separated LayeredRenderGroup from the FRG\n- LayeredRenderGroup extended with new methods\n\n06.06.07: (v1.0.70)\n- changed layersystem, sprite does not know anything about layers\n- only through change_layer the layer of a sprite can be changed\n\n04.06.07:\n- fix change_layer to prevent infinit loop\n- fix need of a view attribute for the sprite when adding it to a group in\n the __init__ method directly\n\n03.06.07:\n- fix: now you can create a group befor the display is initialized again\n- fix: #223 changed . by _\n\n03.06.07:\n- proper version information and license information added\n\n02.06.07:\n- using background now\n- verion() method added so you can get the version\n- dirty flag defaults to 2 (old sprites as DirtySprite as well)\n- default layer is set to ??\n\n29.05.07:\n- fixed clipping, works correctly now\n\n\"\"\"\n\nRELEASE = 1\nSUBRELEASE = 1\nREV = \"$Rev: 93 $\"\nREV = REV[6:-1]\n\nVERSION = 'FastRenderGroup v'+str(RELEASE)+'.'+str(SUBRELEASE)+'.'+REV+\\\n \" DR0ID (c) 2007\"\nVERSION_NUMBER = RELEASE, SUBRELEASE, int(REV)\n\ndel RELEASE\ndel SUBRELEASE\ndel REV\n\n__author__ = \"$Author: DR0ID $\"\n__version__ = VERSION\n__date__ = \"$Date: 2007-08-04 11:45:06 +0200 (Sa, 04 Aug 2007) $\"\n__license__ = 'LGPL, you should have received a copy as LGPL.txt'\n__copyright__ = \"DR0ID (c) 2007\"\n__url__ = \"http://www.mypage.bluewin.ch/DR0ID/index.html\"\n__email__ = \"[email protected]\"\n\n\nimport pygame\nfrom pygame import Rect\nfrom pygame.sprite import Sprite\nfrom pygame.time import get_ticks\n\n\n\n\nclass DirtySprite(pygame.sprite.Sprite):\n \"\"\"\n DirtySprite has new attributes:\n \n dirty: if set to 1, it is repainted and then set to 0 again \n if set to 2 then it is always dirty ( repainted each frame)\n 0 means that it is not dirty and therefor not repainted again\n blendmode: (for pygame 1.8) not used at the moment (actually its the \n special_flags argument of blit)\n visible: normally 1, if set to 0 it will not be repainted \n (you must set it dirty too to be erased from screen)\n \"\"\"\n \n def __init__(self, *groups):\n \"\"\"\n Same as pygame.sprite.Sprite but initializes the new attributes to\n default values:\n dirty = 1 (to be always dirty you have to set it)\n blendmode = 0\n layer = 0 (READONLY value, it is read when adding it to the \n LayeredRenderGroup, for details see doc of \n LayeredRenderGroup)\n \"\"\"\n self.dirty = 1\n self.blendmode = 0 # pygame 1.8, reffered as special_flags in \n # the documentation of blit \n self._visible = 1\n self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty\n self.source_rect = None\n pygame.sprite.Sprite.__init__(self, *groups)\n \n def _set_visible(self, val):\n \"\"\"set the visible value (0 or 1) and makes the sprite dirty\"\"\"\n self._visible = val\n if self.dirty < 2:\n self.dirty = 1\n def _get_visible(self):\n \"\"\"returns the visible value of that sprite\"\"\"\n return self._visible\n visible = property(lambda self: self._get_visible(),\\\n lambda self, value:self._set_visible(value), \\\n doc=\"you can make this sprite disappear without removing it from the group, values 0 for invisible and 1 for visible\")\n \n \n\nclass LayeredUpdates(pygame.sprite.AbstractGroup):\n \"\"\"\n LayeredRenderGroup\n \n A group that handles layers. For drawing it uses the same metod as the \n pygame.sprite.OrderedUpdates.\n \n This group is fully compatible with pygame.sprite.Sprite.\n \"\"\"\n \n def __init__(self, *sprites, **kwargs):\n \"\"\"\n You can set the default layer through kwargs using 'default_layer'\n and an integer for the layer. The default layer is 0.\n \n If the sprite you add has an attribute layer then that layer will\n be used.\n If the kwarg contain 'layer' then the sprites passed will be \n added to that layer (overriding the sprite.layer attribute).\n If neither sprite has attribute layer nor kwarg then the default\n layer is used to add the sprites.\n \"\"\"\n self._spritelayers = {}\n self._spritelist = []\n pygame.sprite.AbstractGroup.__init__(self)\n if kwargs.get('default_layer'):\n self._default_layer = kwargs['default_layer']\n else:\n self._default_layer = 0\n \n self.add(*sprites, **kwargs)\n \n def add_internal(self, sprite, layer=None):\n \"\"\"\n Do not use this method directly. It is used by the group to add a\n sprite internally.\n \"\"\"\n self.spritedict[sprite] = Rect(0, 0, 0, 0) # add a old rect\n \n if layer is None:\n if hasattr(sprite, '_layer'):\n layer = sprite._layer\n else:\n layer = self._default_layer\n \n \n self._spritelayers[sprite] = layer\n if hasattr(sprite, '_layer'):\n sprite._layer = layer\n \n # add the sprite at the right position\n # bisect algorithmus\n sprites = self._spritelist # speedup\n sprites_layers = self._spritelayers\n leng = len(sprites)\n low = 0\n high = leng-1\n mid = low\n while(low<=high):\n mid = low + (high-low)/2\n if(sprites_layers[sprites[int(mid)]]<=layer):\n low = mid+1\n else:\n high = mid-1\n # linear search to find final position\n while(mid<leng and sprites_layers[sprites[int(mid)]]<=layer):\n mid += 1\n sprites.insert(int(mid), sprite)\n \n def add(self, *sprites, **kwargs):\n \"\"\"add(sprite, list, or group, ...)\n add sprite to group\n\n Add a sprite or sequence of sprites to a group.\n \n If the sprite(s) have an attribute layer then that is used \n for the layer. If kwargs contains 'layer' then the sprite(s) \n will be added to that argument (overriding the sprite layer \n attribute). If neither is passed then the sprite(s) will be\n added to the default layer.\n \"\"\"\n layer = None\n if kwargs.get('layer'):\n layer = kwargs['layer']\n if sprites is None or len(sprites)==0:\n return\n for sprite in sprites:\n # It's possible that some sprite is also an iterator.\n # If this is the case, we should add the sprite itself,\n # and not the objects it iterates over.\n if isinstance(sprite, Sprite):\n if not self.has_internal(sprite):\n self.add_internal(sprite, layer)\n sprite.add_internal(self)\n else:\n try:\n # See if sprite is an iterator, like a list or sprite\n # group.\n for spr in sprite:\n self.add(spr, **kwargs)\n except (TypeError, AttributeError):\n # Not iterable, this is probably a sprite that happens\n # to not subclass Sprite. Alternately, it could be an\n # old-style sprite group.\n if hasattr(sprite, '_spritegroup'):\n for spr in sprite.sprites():\n if not self.has_internal(spr):\n self.add_internal(spr, layer)\n spr.add_internal(self)\n elif not self.has_internal(sprite):\n self.add_internal(sprite, layer)\n sprite.add_internal(self)\n \n def remove_internal(self, sprite):\n \"\"\"\n Do not use this method directly. It is used by the group to \n add a sprite.\n \"\"\"\n self._spritelist.remove(sprite)\n # these dirty rects are suboptimal for one frame\n self.lostsprites.append(self.spritedict[sprite]) # dirty rect\n if hasattr(sprite, 'rect'):\n self.lostsprites.append(sprite.rect) # dirty rect\n \n self.spritedict.pop(sprite, 0)\n self._spritelayers.pop(sprite)\n \n def sprites(self):\n \"\"\"\n Returns a ordered list of sprites (first back, last top).\n \"\"\"\n return list(self._spritelist)\n \n def draw(self, surface):\n \"\"\"\n Draw all sprites in the right order onto the passed surface.\n \"\"\"\n spritedict = self.spritedict\n surface_blit = surface.blit\n dirty = self.lostsprites\n self.lostsprites = []\n dirty_append = dirty.append\n for spr in self.sprites():\n rec = spritedict[spr]\n newrect = surface_blit(spr.image, spr.rect)\n if rec is 0:\n dirty_append(newrect)\n else:\n if newrect.colliderect(rec):\n dirty_append(newrect.union(rec))\n else:\n dirty_append(newrect)\n dirty_append(rec)\n spritedict[spr] = newrect\n return dirty\n\n def get_sprites_at(self, pos):\n \"\"\"\n Returns a list with all sprites at that position.\n Bottom sprites first, top last.\n \"\"\"\n _sprites = self._spritelist\n rect = Rect(pos, (0, 0))\n colliding_idx = rect.collidelistall(_sprites)\n colliding = []\n colliding_append = colliding.append\n for i in colliding_idx:\n colliding_append(_sprites[i])\n return colliding\n\n def get_sprite(self, idx):\n \"\"\"\n Returns the sprite at the index idx from the sprites().\n Raises IndexOutOfBounds.\n \"\"\"\n return self._spritelist[idx]\n \n def remove_sprites_of_layer(self, layer_nr):\n \"\"\"\n Removes all sprites from a layer and returns them as a list.\n \"\"\"\n sprites = self.get_sprites_from_layer(layer_nr)\n self.remove(sprites)\n return sprites\n \n\n #---# layer methods\n def layers(self):\n \"\"\"\n Returns a list of layers defined (unique), sorted from botton up.\n \"\"\"\n layers = set()\n for layer in self._spritelayers.values():\n layers.add(layer)\n return list(layers)\n\n def change_layer(self, sprite, new_layer):\n \"\"\"\n Changes the layer of the sprite.\n sprite must have been added to the renderer. It is not checked.\n \"\"\"\n sprites = self._spritelist # speedup\n sprites_layers = self._spritelayers # speedup\n \n sprites.remove(sprite) \n sprites_layers.pop(sprite)\n \n # add the sprite at the right position\n # bisect algorithmus\n leng = len(sprites)\n low = 0\n high = leng-1\n mid = low\n while(low<=high):\n mid = low + (high-low)/2\n if(sprites_layers[sprites[mid]]<=new_layer):\n low = mid+1\n else:\n high = mid-1\n # linear search to find final position\n while(mid<leng and sprites_layers[sprites[mid]]<=new_layer):\n mid += 1\n sprites.insert(mid, sprite)\n if hasattr(sprite, 'layer'):\n sprite.layer = new_layer\n \n # add layer info\n sprites_layers[sprite] = new_layer\n \n def get_layer_of_sprite(self, sprite):\n \"\"\"\n Returns the layer that sprite is currently in. If the sprite is not \n found then it will return the default layer.\n \"\"\"\n return self._spritelayers.get(sprite, self._default_layer)\n \n def get_top_layer(self):\n \"\"\"\n Returns the number of the top layer.\n \"\"\"\n return self._spritelayers[self._spritelist[-1]]\n#### return max(self._spritelayers.values())\n \n def get_bottom_layer(self):\n \"\"\"\n Returns the number of the bottom layer.\n \"\"\"\n return self._spritelayers[self._spritelist[0]]\n#### return min(self._spritelayers.values())\n \n def move_to_front(self, sprite):\n \"\"\"\n Brings the sprite to front, changing the layer o the sprite\n to be in the topmost layer (added at the end of that layer).\n \"\"\"\n self.change_layer(sprite, self.get_top_layer())\n \n def move_to_back(self, sprite):\n \"\"\"\n Moves the sprite to the bottom layer, moving it behind\n all other layers and adding one additional layer.\n \"\"\"\n self.change_layer(sprite, self.get_bottom_layer()-1)\n \n def get_top_sprite(self):\n \"\"\"\n Returns the topmost sprite.\n \"\"\"\n return self._spritelist[-1]\n \n def get_sprites_from_layer(self, layer):\n \"\"\"\n Returns all sprites from a layer, ordered by how they where added.\n It uses linear search and the sprites are not removed from layer.\n \"\"\"\n sprites = []\n sprites_append = sprites.append\n sprite_layers = self._spritelayers\n for spr in self._spritelist:\n if sprite_layers[spr] == layer: \n sprites_append(spr)\n elif sprite_layers[spr]>layer:# break after because no other will \n # follow with same layer\n break\n return sprites\n \n def switch_layer(self, layer1_nr, layer2_nr):\n \"\"\"\n Switches the sprites from layer1 to layer2.\n The layers number must exist, it is not checked.\n \"\"\"\n sprites1 = self.remove_sprites_of_layer(layer1_nr)\n for spr in self.get_sprites_from_layer(layer2_nr):\n self.change_layer(spr, layer1_nr)\n self.add(sprites1, layer=layer2_nr)\n\n\nclass LayeredDirty(LayeredUpdates):\n \"\"\"\n Yet another group. It uses the dirty flag technique and is therefore \n faster than the pygame.sprite.RenderUpdates if you have many static \n sprites. It also switches automatically between dirty rect upte and \n full screen rawing, so you do no have to worry what would be faster. It \n only works with the DirtySprite or any sprite that has the following \n attributes: image, rect, dirty, visible, blendmode (see doc of \n DirtySprite).\n \"\"\"\n \n def __init__(self, *sprites, **kwargs):\n \"\"\"\n Same as for the pygame.sprite.Group.\n You can specify some additional attributes through kwargs:\n _use_update: True/False default is False\n _default_layer: the default layer where the sprites without a layer are\n added.\n _time_threshold: treshold time for switching between dirty rect mode and\n fullscreen mode, defaults to 1000./80 == 1000./fps\n \"\"\"\n LayeredUpdates.__init__(self, *sprites, **kwargs)\n self._clip = None\n \n self._use_update = False\n \n self._time_threshold = 1000./20. # 1000./ fps\n \n \n self._bgd = None\n for key, val in kwargs.items():\n if key in ['_use_update', '_time_threshold', '_default_layer']:\n if hasattr(self, key):\n setattr(self, key, val)\n\n def add_internal(self, sprite, layer=None):\n \"\"\"\n Do not use this method directly. It is used by the group to add a\n sprite internally.\n \"\"\"\n # check if all attributes needed are set\n if not hasattr(sprite, 'dirty'):\n raise AttributeError()\n if not hasattr(sprite, \"visible\"):\n raise AttributeError()\n if not hasattr(sprite, \"blendmode\"):\n raise AttributeError()\n \n if not isinstance(sprite, DirtySprite):\n raise TypeError()\n \n if sprite.dirty == 0: # set it dirty if it is not\n sprite.dirty = 1\n \n LayeredUpdates.add_internal(self, sprite, layer)\n \n def draw(self, surface, bgd=None):\n \"\"\"\n Draws all sprites on the surface you pass in.\n You can pass the background too. If a background is already set, \n then the bgd argument has no effect.\n \"\"\"\n # speedups\n _orig_clip = surface.get_clip()\n _clip = self._clip\n if _clip is None:\n _clip = _orig_clip\n \n \n _surf = surface\n _sprites = self._spritelist\n _old_rect = self.spritedict\n _update = self.lostsprites\n _update_append = _update.append\n _ret = None\n _surf_blit = _surf.blit\n _rect = pygame.Rect\n if bgd is not None:\n self._bgd = bgd\n _bgd = self._bgd\n \n _surf.set_clip(_clip)\n # -------\n # 0. deside if normal render of flip\n start_time = get_ticks()\n if self._use_update: # dirty rects mode\n # 1. find dirty area on screen and put the rects into _update\n # still not happy with that part\n for spr in _sprites:\n if 0 < spr.dirty:\n if spr.source_rect is not None:\n _union_rect = Rect(spr.rect.topleft, spr.source_rect.size)\n else:\n _union_rect = _rect(spr.rect)\n _union_rect_collidelist = _union_rect.collidelist\n _union_rect_union_ip = _union_rect.union_ip\n i = _union_rect_collidelist(_update)\n while -1 < i:\n _union_rect_union_ip(_update[i])\n del _update[i]\n i = _union_rect_collidelist(_update)\n _update_append(_union_rect.clip(_clip))\n \n _union_rect = _rect(_old_rect[spr])\n _union_rect_collidelist = _union_rect.collidelist\n _union_rect_union_ip = _union_rect.union_ip\n i = _union_rect_collidelist(_update)\n while -1 < i:\n _union_rect_union_ip(_update[i])\n del _update[i]\n i = _union_rect_collidelist(_update)\n _update_append(_union_rect.clip(_clip))\n # can it be done better? because that is an O(n**2) algorithm in\n # worst case\n \n # clear using background\n if _bgd is not None:\n for rec in _update:\n _surf_blit(_bgd, rec, rec)\n \n # 2. draw\n for spr in _sprites:\n if 1 > spr.dirty:\n if spr._visible:\n # sprite not dirty, blit only the intersecting part\n if spr.source_rect is not None:\n _spr_rect = Rect(spr.rect.topleft, spr.source_rect.size)\n else:\n _spr_rect = spr.rect\n _spr_rect_clip = _spr_rect.clip\n for idx in _spr_rect.collidelistall(_update):\n # clip\n clip = _spr_rect_clip(_update[idx])\n _surf_blit(spr.image, clip, \\\n (clip[0]-_spr_rect[0], \\\n clip[1]-_spr_rect[1], \\\n clip[2], \\\n clip[3]))#, spr.blendmode)\n else: # dirty sprite\n if spr._visible:\n if spr.source_rect is not None:\n _old_rect[spr] = _surf_blit(spr.image, spr.rect, \\\n spr.source_rect)#, spr.blendmode)\n else:\n _old_rect[spr] = _surf_blit(spr.image, spr.rect)\n\n if spr.dirty == 1:\n spr.dirty = 0\n _ret = list(_update)\n else: # flip, full screen mode\n if _bgd is not None:\n _surf_blit(_bgd, (0, 0))\n for spr in _sprites:\n if spr.visible:\n if spr.source_rect is not None:\n _old_rect[spr] = _surf_blit(spr.image, spr.rect, spr.source_rect)#,spr.blendmode)\n else:\n _old_rect[spr] = _surf_blit(spr.image, spr.rect)#, spr.source_rect)#,spr.blendmode)\n _ret = [_rect(_clip)] # return only the part of the screen changed\n \n \n # timing for switching modes\n # how to find a good treshold? it depends on the hardware it runs on\n end_time = get_ticks()\n if end_time-start_time > self._time_threshold:\n self._use_update = False\n else:\n self._use_update = True\n \n## # debug\n## print \" check: using dirty rects:\", self._use_update\n \n # emtpy dirty reas list\n _update[:] = []\n \n # -------\n # restore original clip\n _surf.set_clip(_orig_clip)\n return _ret\n\n def clear(self, surface, bgd):\n \"\"\"\n Only used to set background.\n \"\"\"\n self._bgd = bgd\n\n def repaint_rect(self, screen_rect): \n \"\"\"\n Repaints the given area.\n screen_rect in screencoordinates.\n \"\"\"\n self.lostsprites.append(screen_rect.clip(self._clip))\n \n def set_clip(self, screen_rect=None):\n \"\"\"\n clip the area where to draw. Just pass None (default) to \n reset the clip.\n \"\"\"\n if screen_rect is None:\n self._clip = pygame.display.get_surface().get_rect()\n else:\n self._clip = screen_rect\n self._use_update = False\n \n def get_clip(self):\n \"\"\"\n Returns the current clip.\n \"\"\"\n return self._clip\n \n def change_layer(self, sprite, new_layer):\n \"\"\"\n Changes the layer of the sprite.\n sprite must have been added to the renderer. It is not checked.\n \"\"\"\n LayeredRenderGroup.change_layer(self, sprite, new_layer)\n if sprite.dirty == 0:\n sprite.dirty = 1\n \n def set_timing_treshold(self, time_ms):\n \"\"\"\n Sets the treshold in milliseconds. Default is 1000./80 where 80 is the\n fps I want to switch to full screen mode.\n \"\"\"\n self._time_threshold = time_ms\n \n\n\n" }, { "alpha_fraction": 0.5630311369895935, "alphanum_fraction": 0.5856940746307373, "avg_line_length": 28.41666603088379, "blob_id": "a17098858300c8179da0b7a3683885d568571014", "content_id": "d04f3cfe96b6f85639b1c54f031fec6a2b33c856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 53, "num_lines": 48, "path": "/15112-CMU/week3/test3.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def getAllSubstrings(s):\n substrings = \"\"\n lenOfS = len(s)\n for i in range(0, lenOfS):\n for j in range(i, lenOfS):\n string = s[i: j+1]\n substrings += string + \",\"\n substrings = substrings[:-1]\n return substrings\n\n\ndef findCommonSubstrings(s1, s2):\n commonStrings = \"\"\n substringOfS1 = getAllSubstrings(s1)\n substringOfS2 = getAllSubstrings(s2)\n strings1 = substringOfS1.split(\",\")\n strings2 = substringOfS2.split(\",\")\n for string_1 in strings1:\n for string_2 in strings2:\n if string_1 == string_2:\n commonStrings += string_1 + \",\"\n commonStrings = commonStrings[:-1]\n return commonStrings\n\n\ndef longestCommonSubstring(s1, s2):\n if s1 == \"\" or s2 == \"\":\n return \"\"\n elif s1 == s2:\n return s1\n else:\n commonStrings = findCommonSubstrings(s1, s2)\n lenofmax = 0\n for commonstring in commonStrings.split(\",\"):\n if len(commonstring) > lenofmax:\n lenofmax = len(commonstring)\n result = \"\"\n for commonstring in commonStrings.split(\",\"):\n if len(commonstring) == lenofmax:\n result += commonstring + \",\"\n result = result[:-1]\n resultList = result.split(\",\")\n return min(resultList)\n\n\n\nprint(longestCommonSubstring(\"abcdef\", \"abqrcdest\"))\nprint(longestCommonSubstring(\"abcABC\", \"zzabZZAB\"))\n" }, { "alpha_fraction": 0.5983810424804688, "alphanum_fraction": 0.5983810424804688, "avg_line_length": 38.09756088256836, "blob_id": "76997033b05aaf64c11d267b3f012f913d95aac8", "content_id": "1d0caa83a97998409c75a76f0ae6524b1296fe99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1606, "license_type": "no_license", "max_line_length": 70, "num_lines": 41, "path": "/15112-CMU/TP/playerAndComputer.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this file is mainly for class Player and Computer\n# which means that team players is controlled by player or computer\nfrom FIFAplayer import *\n\n\nclass Player(FIFAPlayer):\n def __init__(self, centerx, centery):\n blueTeam = 'blue-team'\n super(Player, self).__init__(centerx, centery, blueTeam)\n\n # this is the control method\n def control(self):\n # cite pygame.key.get_pressed() this code structure is from\n # https://github.com/search?q=pygame.key.get_pressed&type=Code\n if pygame.key.get_pressed()[pygame.K_UP] \\\n and pygame.key.get_pressed()[pygame.K_LEFT]:\n self.move(\"upLeft\")\n elif pygame.key.get_pressed()[pygame.K_UP] \\\n and pygame.key.get_pressed()[pygame.K_RIGHT]:\n self.move(\"upRight\")\n elif pygame.key.get_pressed()[pygame.K_DOWN] \\\n and pygame.key.get_pressed()[pygame.K_LEFT]:\n self.move(\"downLeft\")\n elif pygame.key.get_pressed()[pygame.K_DOWN] \\\n and pygame.key.get_pressed()[pygame.K_RIGHT]:\n self.move(\"downRight\")\n elif pygame.key.get_pressed()[pygame.K_UP]:\n self.move(\"up\")\n elif pygame.key.get_pressed()[pygame.K_DOWN]:\n self.move(\"down\")\n elif pygame.key.get_pressed()[pygame.K_LEFT]:\n self.move(\"left\")\n elif pygame.key.get_pressed()[pygame.K_RIGHT]:\n self.move(\"right\")\n\n\n\nclass Computer(FIFAPlayer):\n def __init__(self, centerx, centery):\n redTeam = 'red-team'\n super(Computer, self).__init__(centerx, centery, redTeam)\n\n\n\n" }, { "alpha_fraction": 0.5972313284873962, "alphanum_fraction": 0.6078800559043884, "avg_line_length": 32.44657516479492, "blob_id": "241e6a2d4faec5925579493d2759903d55509a5c", "content_id": "ebfba3ef07a415a71586b8e4e2220a5cf3ec6e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12208, "license_type": "no_license", "max_line_length": 75, "num_lines": 365, "path": "/15112-CMU/week5/removefullrow.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw5\n# Your andrewID:mxu2\n# Your section: 2N\n#################################################\n\n\n#################################################\n# Hw5 tetris Problem\n#################################################\nfrom tkinter import *\nimport random\n\n####################################\n# customize these functions\n####################################\n\n# this is the function generating falling piece\ndef generateFallingPieces():\n # Seven \"standard\" pieces (tetrominoes)\n iPiece = [[True, True, True, True]]\n jPiece = [[True, False, False], [True, True, True]]\n lPiece = [[False, False, True], [True, True, True]]\n oPiece = [[True, True], [True, True]]\n sPiece = [[False, True, True], [True, True, False]]\n tPiece = [[False, True, False], [True, True, True]]\n zPiece = [[True, True, False], [False, True, True]]\n tetrisPieces = [iPiece, jPiece, lPiece, oPiece, sPiece, tPiece, zPiece]\n tetrisPieceColors = [\"red\", \"yellow\", \"magenta\",\n \"pink\", \"cyan\", \"green\", \"orange\"]\n return tetrisPieces, tetrisPieceColors\n\n\n# this is the model of the animation\ndef init(data):\n data.rows = gameDimensions()[0]\n data.cols = gameDimensions()[1]\n data.cellSize = gameDimensions()[2]\n data.margin = gameDimensions()[3]\n data.emptyColor = 'blue'\n data.board = [[data.emptyColor] * data.cols for i in range(data.rows)]\n data.tetrisPieces, data.tetrisPieceColors = generateFallingPieces()\n data.FallingPiece = []\n data.FallingColor = ''\n data.fallingPieceRow = 0\n data.fallingPieceCol = 0\n data.score = 0\n data.isGameOver = False\n newFallingPiece(data)\n\n\n# This is the move falling piece function\ndef moveFallingPiece(data, drow, dcol):\n data.fallingPieceRow += drow\n data.fallingPieceCol += dcol\n if not fallingPieceIsLegal(data):\n data.fallingPieceRow -= drow\n data.fallingPieceCol -= dcol\n return False\n else:\n return True\n\n\n# this is the remove full row function\ndef removeFullRows(data):\n fullRows = 0\n newBoard = []\n for row in range(len(data.board)):\n isFilled = True\n for col in range(len(data.board[0])):\n if data.board[row][col] == data.emptyColor:\n isFilled = False\n if isFilled:\n fullRows += 1\n continue\n else:\n newBoard += [data.board[row]]\n linesNeedToAdd = len(data.board) - len(newBoard)\n for i in range(linesNeedToAdd):\n topPosition = 0\n newBoard.insert(topPosition, [data.emptyColor] * data.cols)\n data.board = newBoard\n data.score += fullRows**2\n\n\n# this is the remove full row function\ndef removeFullRows(data):\n fullRows = 0\n newRow = data.rows - 1\n # loop through board backwards\n for oldRow in range(data.rows - 1, -1, -1):\n isFilledRow = True\n # check whether it is an empty color position\n for i in range(len(data.board[oldRow])):\n # add unfilled row to board at newRow position\n if data.board[oldRow][i] == data.emptyColor:\n for j in range(len(data.board[oldRow])):\n data.board[newRow][j] = data.board[oldRow][j]\n newRow -= 1\n isFilledRow = not isFilledRow\n break\n # there is no empty colors, it is a filled row\n if isFilledRow:\n fullRows += 1\n # fill rest of board with data.emptyColor\n for row in range(newRow, -1, -1):\n for col in range(len(data.board[0])):\n data.board[row][col] = data.emptyColor\n data.score += fullRows ** 2\n\n# this is the key press handler\ndef keyPressed(event, data):\n if event.char == 'r':\n init(data)\n if event.keysym == 'space':\n newFallingPiece(data)\n if event.keysym == \"Up\":\n rotateFallingPiece(data)\n elif event.keysym == \"Down\":\n moveFallingPiece(data, 1, 0)\n if not fallingPieceIsLegal(data):\n moveFallingPiece(data, -1, 0)\n elif event.keysym == \"Left\":\n moveFallingPiece(data, 0, -1)\n if not fallingPieceIsLegal(data):\n moveFallingPiece(data, 0, 1)\n elif event.keysym == \"Right\":\n moveFallingPiece(data, 0, 1)\n if not fallingPieceIsLegal(data):\n moveFallingPiece(data, 0, -1)\n\n\n# this is the mouse press event handler\ndef mousePressed(event, data):\n pass\n\n\n# this is the draw Game over function\ndef draw(canvas, data):\n if data.isGameOver:\n text = 'Game Over!'\n x0 = data.margin\n y0 = data.margin + data.cellSize\n x1 = x0 + data.cellSize * data.cols\n y1 = y0 + data.cellSize * 2\n canvas.create_rectangle(x0, y0, x1, y1, fill = \"black\")\n xt = x0 + (x1 - x0) / 2\n yt = y0 + (y1 - x0) / 3\n canvas.create_text(xt, yt, text = text,\n fill = \"yellow\", font = (\"Helvetica\", 20))\n\n\n# this is the timer of the animation\ndef timerFired(data):\n if not moveFallingPiece(data, +1, 0):\n placeFallingPiece(data)\n if not data.isGameOver:\n newFallingPiece(data)\n if not fallingPieceIsLegal(data):\n data.isGameOver = True\n\n\n# this is the function place the falling piece on the bottom of board\ndef placeFallingPiece(data):\n rowOfPiece = len(data.FallingPiece)\n lenOfPiece = len(data.FallingPiece[0])\n for row in range(rowOfPiece):\n for col in range(lenOfPiece):\n if data.FallingPiece[row][col]:\n color = data.FallingColor\n data.board[row + data.fallingPieceRow]\\\n [col + data.fallingPieceCol] = color\n removeFullRows(data)\n\n\n# this is the new falling piece function\ndef newFallingPiece(data):\n randomIndex = random.randint(0, len(data.tetrisPieces) - 1)\n data.FallingPiece = data.tetrisPieces[randomIndex]\n data.FallingColor = data.tetrisPieceColors[randomIndex]\n # this is the initial position of falling piece\n data.fallingPieceCol = data.cols // 2 - len(data.FallingPiece[0]) // 2\n data.fallingPieceRow = 0\n\n\n# this function draws the falling piece\ndef drawFallingPiece(canvas, data):\n rowOfPiece = len(data.FallingPiece)\n lenOfPiece = len(data.FallingPiece[0])\n for row in range(rowOfPiece):\n for col in range(lenOfPiece):\n if data.FallingPiece[row][col]:\n color = data.FallingColor\n drawCell(canvas, data, row + data.fallingPieceRow,\n col + data.fallingPieceCol, color)\n\n\n# this function will test whether or not falling piece is legal\ndef fallingPieceIsLegal(data):\n # loop through row of falling piece\n for i in range(len(data.FallingPiece)):\n # loop through col of falling piece\n for j in range(len(data.FallingPiece[0])):\n if data.FallingPiece[i][j]:\n if data.fallingPieceCol < 0 or\\\n data.fallingPieceCol + len(data.FallingPiece[0])\\\n > data.cols:\n return False\n elif data.fallingPieceRow < 0 or\\\n data.fallingPieceRow + len(data.FallingPiece)\\\n > data.rows:\n return False\n elif data.board[data.fallingPieceRow + i]\\\n [data.fallingPieceCol + j] != data.emptyColor:\n return False\n return True\n\n\n# this function return the row, cols of the data.\ndef getRowNumRowsAndCenter(data):\n row = data.fallingPieceRow\n numRows = len(data.FallingPiece)\n centerRow = row + numRows / 2\n return row, numRows, centerRow\n\n\n# this function return the col, cols of the data.\ndef getColNumColsAndCenter(data):\n col = data.fallingPieceCol\n numCols = len(data.FallingPiece[0])\n return col, numCols\n\n\n# this is the rotate function\ndef rotateFallingPiece(data):\n oldPiece = data.FallingPiece\n lenOldRow = len(data.FallingPiece)\n lenOldCol = len(data.FallingPiece[0])\n oldColPosition = data.fallingPieceCol\n oldRowPosition = data.fallingPieceRow\n oldRow, oldNumRows, oldCenterRow = getRowNumRowsAndCenter(data)\n oldCol,oldNumCols = getColNumColsAndCenter(data)\n lenNewRow = lenOldCol\n lenNewCol = lenOldRow\n newPiece = []\n for i in range(lenNewRow):\n newPiece += [[None]*lenNewCol]\n for x in range(lenOldRow):\n for y in range(lenOldCol):\n if oldPiece[x][y]:\n newPiece[lenOldCol - 1 - y][x] = oldPiece[x][y]\n data.FallingPiece = newPiece\n newRow, newNumRows, newCenterRow = getRowNumRowsAndCenter(data)\n newCol, newNumCols = getColNumColsAndCenter(data)\n newCol = oldCol + oldNumCols / 2 - newNumCols / 2\n newRow = oldRow + oldNumRows / 2 - newNumRows / 2\n data.fallingPieceCol = round(newCol)\n data.fallingPieceRow = round(newRow)\n if not fallingPieceIsLegal(data):\n data.FallingPiece = oldPiece\n data.fallingPieceCol = oldColPosition\n data.fallingPieceRow = oldRowPosition\n\n\n# this is function is used to draw score\ndef drawScore(canvas,data):\n canvas.create_text(data.width / 2, data.height / 25,\n text = \"Score: \" + str(data.score))\n\n\n# This is the function that draws the cell\ndef drawCell(canvas, data, row, col, color):\n x0 = col * data.cellSize + data.margin\n y0 = row * data.cellSize + data.margin\n x1 = x0 + data.cellSize\n y1 = y0 + data.cellSize\n color = color\n canvas.create_rectangle(x0, y0, x1, y1, fill = color,\n outline = 'black', width = 4)\n\n\n# This is the function that draws the board\ndef drawBoard(canvas, data):\n for row in range(data.rows):\n for col in range(data.cols):\n color = data.board[row][col]\n drawCell(canvas, data, row, col, color)\n\n\n# This function return the default features of Tetris\ndef gameDimensions():\n rows = 15\n cols = 10\n cellSize = 20\n margin = 25\n return(rows, cols, cellSize, margin)\n\n\n# This function set up the window for this game\ndef playTetris():\n (rows, cols, cellSize, margin) = gameDimensions()\n width = cols * cellSize + 2 * margin\n height = rows * cellSize + 2 * margin\n run(width, height)\n\n\n# this is the main draw function of the animation\ndef redrawAll(canvas, data):\n # draw orange background\n canvas.create_rectangle(0, 0, data.width, data.height, fill = 'orange')\n drawBoard(canvas, data)\n drawScore(canvas, data)\n drawFallingPiece(canvas, data)\n if data.isGameOver:\n draw(canvas, data)\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill = 'white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 1000 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(250, 350)\n" }, { "alpha_fraction": 0.5881825089454651, "alphanum_fraction": 0.6213911771774292, "avg_line_length": 24.71538543701172, "blob_id": "16e2070bcbc39259ec5b156ebd0fe42abf50e26a", "content_id": "c3160e057059a3ae2940e74c38cb60573240b969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6685, "license_type": "no_license", "max_line_length": 99, "num_lines": 260, "path": "/15112-CMU/week4 cold cold/animation.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport math\n# This is the init function of animation\ndef init(data):\n data.torX = data.width/2\n data.torY = data.height/2\n data.commands = listOfDataCode(data)\n data.commandsToDraw = []\n data.torToDraw = []\n data.counter = 0\n\n# Return list of data code\ndef listOfDataCode(data):\n lstOfDataCode = []\n for line in data.code.split(\"\\n\"):\n lstOfDataCode.append(line)\n lstOfDataCode.pop()\n lstOfDataCode = lstOfDataCode[1:]\n return lstOfDataCode\n\n\n# this is a mouse event handler\ndef mousePressed(event, data):\n pass\n\n\n# this is a keyboard event handler\ndef keyPressed(event, data):\n if (event.keysym == \"Return\"):\n if data.counter < len(data.commands):\n data.commandsToDraw.append((data.commands[data.counter], data.counter))\n data.counter += 1\n\n\n# Get information of each command\ndef getInformationOfCommands(text, color, x, y, angle):\n color = color\n move = 0\n angle = angle\n if text.startswith('color') and \"#\" not in text:\n color = text[6:]\n if color == None:\n color = \"white\"\n elif text.startswith('color') and \"#\" in text:\n color = text.split(\" \")[1]\n if color == None:\n color = \"white\"\n elif text.startswith('left'):\n angle = + int(text[5:])\n elif text.startswith('right'):\n angle = - int(text[6:])\n elif text.startswith('move'):\n move = int(text[5:])\n x = x + move * math.cos((angle/360)*2*math.pi)\n if angle < 0:\n y = y - move * math.sin((angle/360)*2*math.pi)\n else:\n y = y + move * math.sin((angle/360)*2*math.pi)\n if text == []:\n color, x, y, angle = color, x, y, angle\n return color, x, y, angle\n\n\n# This is the run current line function\ndef runProgram(canvas, data, currentLine):\n color = \"\"\n angle = 0\n for i in range(currentLine):\n # print(data.commandsToDraw[i])\n canvas.create_text(data.width/50,\n data.height/20 + data.commandsToDraw[i][1]*data.height/30,\n text=str(data.commandsToDraw[i][0]), anchor=\"w\", fill='gray')\n color,data.torX,data.torY,angle = \\\n getInformationOfCommands(data.commandsToDraw[i][0], color, data.torX, data.torY, angle)\n\n # print(color, data.torX, data.torY, angle)\n drawArrow(canvas, data.torX, data.torY, angle)\n # color, data.torX, data.torY, angle = drawArrow(canvas, data.torX, data.torY, angle)\n # print(color, data.torX, data.torY, angle,\"hhh\")\n\n# this function get the coordinate of the black rectangle\ndef getCoordinateOfRectangle(width, height):\n recx0 = 0\n recy0 = (9 / 10) * height\n recx1 = width\n recy1 = height\n return recx0, recy0, recx1, recy1\n\n# this function get the color of the small rectangles\ndef getColorOfBox(i):\n if i == 0:\n color = 'red'\n elif i == 1:\n color = 'orange'\n elif i == 2:\n color = 'yellow'\n elif i == 3:\n color = 'green'\n elif i == 4:\n color = 'blue'\n elif i == 5:\n color = 'purple'\n else:\n color = 'white'\n return color\n\n\n# this function get the coordinates of the small rectangles\ndef coordinatesInLoop(margin, i, widthOfRectangles, recy0):\n x0 = margin + i * widthOfRectangles + i * margin\n y0 = recy0 + margin\n x1 = x0 + widthOfRectangles\n y1 = y0 + widthOfRectangles - margin\n return x0, y0, x1, y1\n\n\n# this is the main animation function\ndef redrawAll(canvas, data):\n runProgram(canvas, data, data.counter)\n margin = (1/100)*data.height\n widthOfRectangles = (data.width - 11 * margin)/10\n recx0, recy0, recx1, recy1 = getCoordinateOfRectangle(data.width, data.height)\n canvas.create_rectangle(recx0, recy0, recx1, recy1, fill='black')\n for i in range(10):\n x0, y0, x1, y1 = coordinatesInLoop(margin, i, widthOfRectangles, recy0)\n color = getColorOfBox(i)\n canvas.create_rectangle(x0, y0, x1, y1, fill=color)\n\n\n\"\"\" This function is provided as part of the starter code.\nYou don't need to change it, but you should call it!\"\"\"\ndef drawArrow(canvas, x, y, angle):\n offset = 135\n r = 10\n x1 = x + r*math.cos(math.radians(angle))\n y1 = y - r*math.sin(math.radians(angle))\n x2 = x + r*math.cos(math.radians(angle + offset))\n y2 = y - r*math.sin(math.radians(angle + offset))\n x3 = x + r*math.cos(math.radians(angle - offset))\n y3 = y - r*math.sin(math.radians(angle - offset))\n canvas.create_polygon(x1, y1, x2, y2, x3, y3, fill=\"black\")\n\n\ndef runTortoiseAnimation(code, width=500, height=500):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.code = code\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n root.mainloop() # blocks until window is closed\n\ndef testTortoiseAnimation():\n print(\"Running Tortoise Animation...\", end=\"\")\n runTortoiseAnimation(\"\"\"\n# This is a simple tortoise program\ncolor blue\nmove 50\n\nleft 90\n\ncolor red\nmove 100\n\ncolor none # turns off drawing\nmove 50\n\nright 45\n\ncolor green # drawing is on again\nmove 50\n\nright 45\n\ncolor orange\nmove 50\n\nright 90\n\ncolor purple\nmove 100\n\"\"\")\n# runTortoiseAnimation(\"\"\"\n# # Y\n# color red\n# right 45\n# move 50\n# right 45\n# move 50\n# right 180\n# move 50\n# right 45\n# move 50\n# color none # space\n# right 45\n# move 25\n#\n# # E\n# color green\n# right 90\n# move 85\n# left 90\n# move 50\n# right 180\n# move 50\n# right 90\n# move 42\n# right 90\n# move 50\n# right 180\n# move 50\n# right 90\n# move 43\n# right 90\n# move 50 # space\n# color none\n# move 25\n#\n# # S\n# color blue\n# move 50\n# left 180\n# move 50\n# left 90\n# move 43\n# left 90\n# move 50\n# right 90\n# move 42\n# right 90\n# move 50\n# \"\"\")\n print(\"Done.\")\n\ntestTortoiseAnimation()" }, { "alpha_fraction": 0.6678414344787598, "alphanum_fraction": 0.6986784338951111, "avg_line_length": 29.70270347595215, "blob_id": "6110c61374ef089c2bf9c0237479034abb1081c2", "content_id": "9f22eddba20f9249988fc53498832f0ea9586f8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1135, "license_type": "no_license", "max_line_length": 48, "num_lines": 37, "path": "/15112-CMU/FIFAworldcup copy2/functions.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import os\nfrom const import *\nimport numpy\nimport random\n\ndef rename():\n folder = 'assets/sounds/hits'\n for filename in os.listdir(folder):\n infilename = os.path.join(folder,filename)\n if not os.path.isfile(infilename): continue\n oldbase = os.path.splitext(filename)\n newname = infilename.replace('.mp3', '.wav')\n output = os.rename(infilename, newname)\n\ndef caculateDistance(position1, position2):\n x = abs(position2[0] - position1[0])\n y = abs(position2[1] - position1[1])\n\n return x * x + y * y\n\ndef convertDirectVector(direction):\n if direction is LEFT:\n return pygame.Vector2(-1, 0).normalize()\n elif direction is RIGHT:\n return pygame.Vector2(1, 0).normalize()\n elif direction is UP:\n return pygame.Vector2(0, -1).normalize()\n elif direction is DOWN:\n return pygame.Vector2(0, 1).normalize()\n elif direction is UP_LEFT:\n return pygame.Vector2(-1, -1).normalize()\n elif direction is UP_RIGHT:\n return pygame.Vector2(1, -1).normalize()\n elif direction is DOWN_LEFT:\n return pygame.Vector2(-1, 1).normalize()\n elif direction is DOWN_RIGHT:\n return pygame.Vector2(1, 1).normalize()" }, { "alpha_fraction": 0.49643975496292114, "alphanum_fraction": 0.5081173181533813, "avg_line_length": 30.070796966552734, "blob_id": "e6037f2276fdf43d8ed411f25cbafed06a638578", "content_id": "3a04eb567bc6de8e9859eddecb74450278ab9490", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3511, "license_type": "no_license", "max_line_length": 75, "num_lines": 113, "path": "/15112-CMU/week10/demo2.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\n\ndef isValid(board, queenRow, queenCol):\n # A board is legal if no two queens can attack each other\n # We only need to check the most recently placed queen\n for row in range(len(board)):\n for col in range(len(board[0])):\n if queenRow == row and queenCol == col:\n continue\n elif board[row][col] == \"Q\":\n if ((queenRow == row) or\n (queenCol == col) or\n (queenRow + queenCol == row + col) or\n (queenRow - queenCol == row - col)):\n return False\n return True\n\ndef solve(board, queen):\n if queen == len(board):\n return board\n else:\n row = queen\n for col in range(len(board[row])):\n if isValid(board, row, col):\n board[row][col] = \"Q\"\n tmp = solve(board, queen+1)\n if tmp is not None:\n return board\n board[row][col] = \" \"\n return None\n\n\ndef nQueens(n):\n board = [[\" \"] * n for row in range(n)]\n return solve(board, 0)\n\n# this is the decorator for makeExample2DList\ndef print2DListResult(makeExample2DList):\n def printLst(n):\n lst = makeExample2DList(n)\n rows = len(lst)\n cols = len(lst[0])\n res = ''\n for row in range(rows):\n res += (\"[\" + \" \"*n)\n for col in range(cols):\n numOfspace = n\n lenOfnum = len(str(lst[row][col]))\n if lenOfnum > 1:\n numOfspace -= (lenOfnum - 1)\n res += str(lst[row][col])\n res += \" \" * numOfspace\n res += \"]\\n\"\n return res\n return printLst\n\n\n# this is the main function for make example 2D list\n@print2DListResult\ndef makeExample2DList(n):\n myList= nQueens(6)\n return myList\n\nprint(makeExample2DList(5))\n\ndef drawVicsek(canvas, x, y, size, level):\n if level == 0:\n canvas.create_rectangle(x, y, x + size, y+size,fill= 'black')\n else:\n newSize =size/3\n for row in range(3):\n for col in range(3):\n if (row + col)%2==0:\n drawVicsek(canvas,\n x+col*newSize,y+col*newSize,newSize,level-1)\n\n\ndef init(data):\n data.level = 1\n\ndef drawVicsek(canvas, x, y, size, level):\n if level == 0:\n canvas.create_rectangle(x, y, x + size, y+size,fill= 'black')\n else:\n newSize =size/3\n for row in range(3):\n for col in range(3):\n if (row + col)%2==0:\n drawVicsek(canvas,\n x+col*newSize,y+col*newSize,newSize,level-1)\n\ndef keyPressed(event, data):\n if event.keysym in [\"Up\", \"Right\"]:\n data.level += 1\n elif (event.keysym in [\"Down\", \"Left\"]) and (data.level > 0):\n data.level -= 1\n\ndef redrawAll(canvas, data):\n margin = min(data.width, data.height)//10\n otherParams = None\n drawVicsek(canvas, data.level, otherParams)\n canvas.create_text(data.width/2, 0,\n text = \"Level %d Fractal\" % (data.level),\n font = \"Arial \" + str(int(margin/3)) + \" bold\",\n anchor=\"n\")\n canvas.create_text(data.width/2, margin,\n text = \"Use arrows to change level\",\n font = \"Arial \" + str(int(margin/4)),\n anchor=\"s\")\n\ndef mousePressed(event, data): pass\n\ndef timerFired(data): pass\n" }, { "alpha_fraction": 0.5456658005714417, "alphanum_fraction": 0.5657730102539062, "avg_line_length": 28.83733367919922, "blob_id": "5c1a165e0238d86202a54bebcae82a77099901d8", "content_id": "4665063059c9ab4a684bb41cee983f946df35ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11190, "license_type": "no_license", "max_line_length": 78, "num_lines": 375, "path": "/15112-CMU/week7/hw7.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw7\n# Your andrewID:mxu2\n# Your section: 2N\n#################################################\nimport time\n\n\"\"\"\n# 1A: This function just exchange the first and the last\n# element of a list\ndef slow1(lst): # N is the length of the list lst\n assert(len(lst) >= 2)\n a = lst.pop() # O(1)\n b = lst.pop(0) # O(N)\n lst.insert(0, a) # O(N)\n lst.append(b) # O(1)\n# 1B: O(N)\n\ndef fast1(lst):\n assert(len(lst) >= 2)\n lst[0],lst[-1] = lst[-1],lst[0] #O(1)\n\n# 1D: O(1)\n\n\n# 2A: This function returns how many different numbers in a list\ndef slow2(lst): # N is the length of the list lst\n counter = 0 # O(1)\n for i in range(len(lst)): # Loops N times\n if lst[i] not in lst[:i]: # O(N**2)\n counter += 1 # O(1)\n return counter # O(1)\n \n# 2B: O(N**3)\n\n\ndef fast2(lst):\n a = set(lst) #O(N)\n return len(a) #O(1)\n \n# 2D: O(N)\n\n\n# 3A: This function returns a letter (lowercase) in a string,\n# which appears the most number of times.\n# If two letter's appear frequency is equal, return\n# the smaller letter\n\nimport string\ndef slow3(s): # N is the length of the string s\n maxLetter = \"\" # O(1)\n maxCount = 0 # O(1)\n for c in s: # Loops N times\n for letter in string.ascii_lowercase: # Loops O(1) == 26 times\n if c == letter: # O(1)\n if s.count(c) > maxCount or \\\n s.count(c) == maxCount and \\\n c < maxLetter: # O(N)\n maxCount = s.count(c) # O(N)\n maxLetter = c # O(1)\n return maxLetter # O(1)\n# 3B: O(N**2)\n\n\ndef fast3(s):\n maxLetter = \"\" #O(1)\n maxCount = 0 #O(1)\n d = dict() #O(1)\n for c in s: # Loop O(N)\n if c.islower(): # O(1)\n if c not in d: \n d[c] = 1\n else:\n d[c] += 1\n for char in d: # Loop O(N)\n if d[char] > maxCount or d[char] == maxCount and \\\n char < maxLetter: #O(1)\n maxCount = d[char] #O(1)\n maxLetter = char #O(1)\n return maxLetter #O(1)\n \n# 3D: O(N)\n\n\n# 4A: This function return the maximum value\n# of difference of values in two lists\ndef slow4(a, b): # a and b are lists with the same length N\n assert(len(a) == len(b))\n result = abs(a[0] - b[0]) # O(1)\n for c in a: # Loops N times\n for d in b: # Loops N times\n delta = abs(c - d) # O(1)\n if (delta > result): # O(1)\n result = delta # O(1)\n return result # O(1)\n \n# 4B: O(N**2)\n\ndef fast4(a, b):\n minOfa = min(a) #O(N)\n maxOfa = max(a) #O(N)\n minOfb = min(b) #O(N)\n maxOfb = max(b) #O(N)\n delta1 = abs(maxOfa - minOfb) #O(1)\n delta2 = abs(maxOfb - minOfa) #O(1)\n if delta1 > delta2: #O(1)\n return delta1 #O(1)\n else:\n return delta2 #O(1)\n \n# 4D: O(N)\n\"\"\"\n\n\n# This is the function of checking whether it\n# # contains Pythagorean Triple\ndef containsPythagoreanTriple(lst):\n lst.sort()\n for i in range(len(lst) - 2):\n a = lst[i]**2\n b = lst[i + 1]**2\n c = a + b\n if c**0.5 in lst[i + 2: len(lst)]:\n return True\n else:\n continue\n return False\n\n\n\n\ndef getPairSum(lst, target):\n if len(lst) <= 1:\n return None\n s = set()\n for i in range(len(lst)):\n b = target - lst[i]\n if b in s: # b has been seen before\n return(b, lst[i])\n else:\n s.add(lst[i])\n return None\n\n\n# this is a helper function to swap number in list\ndef swap(a, i, j):\n (a[i], a[j]) = (a[j], a[i])\n\n\n# this function is the algorithm of selection sort list\ndef instrumentedSelectionSort(lst):\n start = time.time()\n n = len(lst)\n numOfComparisons = 0\n numOfSwaps = 0\n for startIndex in range(n):\n minIndex = startIndex\n for i in range(startIndex + 1, n):\n numOfComparisons += 1\n if (lst[i] < lst[minIndex]):\n minIndex = i\n swap(lst, startIndex, minIndex)\n numOfSwaps += 1\n end = time.time()\n timeToRun = (end - start) / 1 # millisecond\n return (numOfComparisons, numOfSwaps, timeToRun)\n\n\n# this function is the algorithm of bubble sort list\ndef instrumentedBubbleSort(lst):\n start = time.time()\n n = len(lst)\n end = n\n numOfComparisons = 0\n numOfSwaps = 0\n swapped = True\n while (swapped):\n swapped = False\n for i in range(1, end):\n numOfComparisons += 1\n if (lst[i - 1] > lst[i]):\n swap(lst, i - 1, i)\n numOfSwaps += 1\n swapped = True\n end -= 1\n end = time.time()\n timeToRun = (end - start) / 1 # millisecond\n return (numOfComparisons, numOfSwaps, timeToRun)\n\n\n# This function return a random generating list\ndef getARandomList():\n import random\n result = []\n lenOflist = 1000\n for i in range(lenOflist):\n result.append(random.randint(0, 50)) # generate random number\n return result\n\n\n# This function generate a random list\ndef generateARandomList():\n import random\n result = []\n lenOflist = 500\n for i in range(lenOflist):\n result.append(random.randint(0, 100)) # generate random number\n return result\n\n\n# This is a helper function for selectionSortVersusBubbleSort\ndef verifyBigO(l1, l2):\n import math\n timeToRun3 = instrumentedSelectionSort(l1)[2]\n timeToRun4 = instrumentedSelectionSort(l2)[2]\n print(\"For function instrumentedSelectionSort, \"\n \"when the len of list is N the runtime is \" + str(timeToRun3))\n print(\"For function instrumentedSelectionSort, \"\n \"when the len of list is 2N the runtime is \" + str(timeToRun4))\n print(\"The ratio of runtime of len2N / that of lenN is \" +\n str(math.ceil(timeToRun4 / timeToRun3)) +\n \". So verified it is a O(N**2)\")\n timeToRun5 = instrumentedBubbleSort(l1)[2]\n timeToRun6 = instrumentedBubbleSort(l2)[2]\n print(\"For function instrumentedBubbleSort, \"\n \"when the len of list is N the runtime is \" + str(timeToRun5))\n print(\"For function instrumentedBubbleSort, \"\n \"when the len of list is 2N the runtime is \" + str(timeToRun6))\n print(\"The ratio of runtime of len2N / that of lenN is \" +\n str(math.ceil(timeToRun6 / timeToRun5)) +\n \". So verified it is a O(N**2)\")\n\n\n# this function print the report of comparing the above two sorting algorithms\ndef selectionSortVersusBubbleSort():\n lst = getARandomList()\n l1 = generateARandomList() # list for checking O(N**2)\n l2 = generateARandomList()*2\n verifyBigO(l1, l2)\n (numOfComparisons1, numOfSwaps1, timeToRun1) = \\\n instrumentedSelectionSort(lst)\n (numOfComparisons2, numOfSwaps2, timeToRun2) = \\\n instrumentedBubbleSort(lst)\n if numOfComparisons1 < numOfComparisons2:\n print(\"The function instrumentedSelectionSort() uses fewer\"\n \" comparisons\")\n else:\n print(\"The function instrumentedBubbleSort() uses fewer comparisons\")\n if numOfSwaps1 < numOfSwaps2:\n print(\"The function instrumentedSelectionSort() makes fewer swaps\")\n else:\n print(\"The function instrumentedBubbleSort() makes fewer swaps\")\n if timeToRun1 < timeToRun2:\n print(\"The function instrumentedSelectionSort() runs in less time\")\n else:\n print(\"The function instrumentedBubbleSort() runs in less time\")\n\n\n# this is the function that report the information of the movie awards\ndef movieAwards(oscarResults):\n d = dict()\n movieNames = []\n for awards in oscarResults:\n movieNames.append(awards[1])\n for movieName in movieNames:\n if movieName not in d:\n d[movieName] = 1\n else:\n d[movieName] += 1\n return d # this is the dictionary of the award\n\n\n# this function return a dict of friends of friends\ndef getFriendsOfFriends(friend, d):\n return d[friend]\n\n\n# this function return friends of my friends\ndef friendsOfMyFriend(myself, myfriends, d):\n lstOfFriendsOfMyFriend = []\n for friend in myfriends:\n lstOfFriendsOfMyFriend += list(getFriendsOfFriends(friend, d))\n setOfFriendsOfMyFriend = set(lstOfFriendsOfMyFriend)\n if myself in setOfFriendsOfMyFriend:\n setOfFriendsOfMyFriend.remove(myself)\n for myFriend in myfriends:\n if myFriend in setOfFriendsOfMyFriend:\n setOfFriendsOfMyFriend.remove(myFriend)\n return setOfFriendsOfMyFriend\n\n\n# This is the main function of friends of friends\ndef friendsOfFriends(d):\n result = dict()\n for c in d:\n myself = c\n myFriends = d[c]\n setFriendsOfFriends = friendsOfMyFriend(myself, myFriends, d)\n result[myself] = setFriendsOfFriends\n return result\n\n\n#################################################\n# The following are test cases for this homework\n#################################################\n\ndef testContainsPythagoreanTriple():\n print(\"Testing testContainsPythagoreanTriple()...\", end=\"\")\n assert(containsPythagoreanTriple([1, 3, 6, 2, 5, 1, 4]) == True)\n assert(containsPythagoreanTriple([1, 3, 6, 2, 1, 4]) == False)\n print(\"passed!\")\n\n\ndef testGetPairSum():\n print(\"Testing testGetPairSum()...\", end=\"\")\n assert(getPairSum([1], 1) == None)\n assert(getPairSum([5, 2], 7) in [(5, 2), (2, 5)])\n assert(getPairSum([10, -1, 1, -8, 3, 1], 2) in\\\n [(10, -8), (-8, 10), (-1, 3), (3, -1), (1, 1)])\n assert(getPairSum([10, -1, 1, -8, 3, 1], 10) == None)\n print(\"passed!\")\n\n\ndef testMovieAwards():\n print(\"Testing testMovieAwards()...\", end=\"\")\n assert(movieAwards({\n (\"Best Picture\", \"Green Book\"),\n (\"Best Actor\", \"Bohemian Rhapsody\"),\n (\"Best Actress\", \"The Favourite\"),\n (\"Film Editing\", \"Bohemian Rhapsody\"),\n (\"Best Original Score\", \"Black Panther\"),\n (\"Costume Design\", \"Black Panther\"),\n (\"Sound Editing\", \"Bohemian Rhapsody\"),\n (\"Best Director\", \"Roma\")\n }) == {\n \"Black Panther\" : 2,\n \"Bohemian Rhapsody\" : 3,\n \"The Favourite\" : 1,\n \"Green Book\" : 1,\n \"Roma\" : 1\n })\n print(\"passed!\")\n\n\ndef testFriendsOfFriends():\n print(\"Testing testFriendsOfFriends()...\", end=\"\")\n d = {}\n d[\"jon\"] = set([\"arya\", \"tyrion\"])\n d[\"tyrion\"] = set([\"jon\", \"jaime\", \"pod\"])\n d[\"arya\"] = set([\"jon\"])\n d[\"jaime\"] = set([\"tyrion\", \"brienne\"])\n d[\"brienne\"] = set([\"jaime\", \"pod\"])\n d[\"pod\"] = set([\"tyrion\", \"brienne\", \"jaime\"])\n d[\"ramsay\"] = set()\n assert(friendsOfFriends(d) == {\n 'tyrion': {'arya', 'brienne'},\n 'pod': {'jon'},\n 'brienne': {'tyrion'},\n 'arya': {'tyrion'},\n 'jon': {'pod', 'jaime'},\n 'jaime': {'pod', 'jon'},\n 'ramsay': set()\n })\n print(\"Passed!\")\n\n\n# this is the test all function\ndef testAll():\n testContainsPythagoreanTriple()\n testGetPairSum()\n testMovieAwards()\n testFriendsOfFriends()\n selectionSortVersusBubbleSort()\n\ntestAll()\n\n" }, { "alpha_fraction": 0.5244755148887634, "alphanum_fraction": 0.5262237787246704, "avg_line_length": 30.44444465637207, "blob_id": "bfa19bad9f83cc6d18cd835ec1e15aeabb8c9f0b", "content_id": "fb7a2c97543bc7214609081d5cea5e6d83086e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/15112-CMU/week10/test222.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import os\n\ndef findLargestFile(path):\n if os.path.isfile(path):\n return path\n else:\n largestFile = 0\n largestFilePath = \"\"\n for filename in os.listdir(path):\n if filename.startswith('.'):\n continue\n tempPath = findLargestFile(path + \"/\" + filename)\n if os.path.isfile(tempPath):\n temp = os.path.getsize(tempPath)\n if largestFile < temp:\n largestFile = temp\n largestFilePath = tempPath\n return largestFilePath\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.584967315196991, "alphanum_fraction": 0.5915032625198364, "avg_line_length": 28.095237731933594, "blob_id": "c4a857d34b72a17be9fbfa13046075d0339f76be", "content_id": "5540a9672cc1fe1e9667122a6b95903a1a50be3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/15112-CMU/Design Proposal and TP/TP/music.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# this file only contains class backgroundmusic\nimport os\nimport pygame\n\n\nclass BackGroundMusic:\n def __init__(self):\n self.musicPath = None\n\n # method to load music\n def loadMusic(self):\n path = \"assets/sounds/music_background/\"\n for filename in os.listdir(path):\n if filename.endswith(\".DS_Store\"):\n continue\n else:\n if filename.endswith(\".mp3\"):\n self.musicPath = path + filename\n pygame.mixer.music.load(self.musicPath)\n pygame.mixer.music.play(-1)\n pygame.mixer.music.set_volume(0.3)\n\n" }, { "alpha_fraction": 0.48417720198631287, "alphanum_fraction": 0.4918248951435089, "avg_line_length": 27.734848022460938, "blob_id": "6c1929b190660d3b2f6fd4883eb72ee5e82aa384", "content_id": "b7c50b66fcfdc6fb234e371447a56c29da313d04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3792, "license_type": "no_license", "max_line_length": 80, "num_lines": 132, "path": "/15112-CMU/paractice/liekail.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from learnhmm import *\nfrom math import log\nimport sys\n\n\ndef new_dict(infile_path):\n res = dict()\n index = 0\n with open(infile_path, 'r') as f:\n for item in f:\n char = item.strip()\n res[index] = char\n index += 1\n return res\n\n\ndef viterbi(prior, trans, emit, example, len_tag_dict):\n predict = []\n W = np.zeros((len(example), len_tag_dict))\n B = np.zeros((len(example), len_tag_dict))\n # print(example)\n # print(\">>>>>\")\n # print(trans)\n for t in range(len(example)):\n if t == 0:\n # print(np.array(prior).T)\n # print(emit[:, example[t]])\n W[t] = prior + emit[:, example[t]]\n for i in range(len_tag_dict):\n B[t][i] = i\n # print(W)\n # print(\" \")\n # print(B)\n else:\n # print(W)\n # print(B)\n for k in range(len_tag_dict):\n # print((t, k))\n W_t_minus_1 = W[t - 1]\n # print(W_t_minus_1)\n # print(\"asdsad\")\n tmp = []\n for j in range(len_tag_dict):\n # print(W_t_minus_1[j])\n tmp_val = W_t_minus_1[j] + trans[j][k] + emit[k][example[t]]\n tmp.append(tmp_val)\n # print(\"tmp = [] is\" ,tmp)\n # print(\"argmax is\", np.argmax(tmp))\n index = np.argmax(tmp)\n B[t][k] = index\n W[t][k] = tmp[index]\n # print(\"final W is\\n\", W)\n # print(\"final B is\\n\", B)\n # print(W[len(example) - 1])\n y_hat_T = np.argmax(W[len(example) - 1])\n # print(y_hat_T)\n predict.append(y_hat_T)\n # print(\">>>>>>>>>>>>>>>>>>>.\")\n for t in range(len(example) - 1, 0, -1):\n # print(t)\n y_hat_T_minus_1 = B[t][int(y_hat_T)]\n # print(\"y_hat_T_minus_1 is\", y_hat_T_minus_1)\n predict.insert(0, y_hat_T_minus_1)\n y_hat_T = y_hat_T_minus_1\n # print(\"predict is \", predict)\n res = []\n for i in range(len(predict)):\n res.append(int(predict[i]))\n # print(\"final res is\", res)\n return res\n\n\ndef main():\n test_input = sys.argv[1]\n index_to_word = sys.argv[2]\n index_to_tag = sys.argv[3]\n hmmprior = sys.argv[4]\n hmmemit = sys.argv[5]\n hmmtrans = sys.argv[6]\n predicted_file = sys.argv[7]\n metrics_file = sys.argv[8]\n\n words, tags = parse_data(test_input)\n # print(words)\n # print(tags)\n tag_dict = load_index_dict(index_to_tag)\n word_dict = load_index_dict(index_to_word)\n # print(tag_dict)\n # print(word_dict)\n\n tag_dict_new = new_dict(index_to_tag)\n word_dict_new = new_dict(index_to_word)\n # print(tag_dict_new)\n # print(word_dict_new)\n\n words_idx_lst = convert_to_index(words, word_dict)\n tags_idx_lst = convert_to_index(tags, tag_dict)\n # print(words_idx_lst)\n # print(tags_idx_lst)\n prior = np.loadtxt(hmmprior)\n trans = np.loadtxt(hmmtrans)\n emit = np.loadtxt(hmmemit)\n\n prior = np.log(prior)\n trans = np.log(trans)\n emit = np.log(emit)\n # print(prior)\n # print(\" \")\n # print(trans)\n # print(\" \")\n # print(emit)\n # print(\" \")\n\n # print(\".................................\")\n out_str = \"\"\n for example in words_idx_lst:\n predict = viterbi(prior, trans, emit, example, len(tag_dict))\n sub_str = \"\"\n for word, tag in zip(example, predict):\n # print(word, tag)\n word_str = word_dict_new[word]\n pred_str = tag_dict_new[tag]\n sub_str += (word_str + \"_\" + pred_str + \" \")\n sub_str = sub_str[:-1]\n out_str += sub_str + \"\\n\"\n out_str = out_str[:-1]\n print(\"out_str is\", out_str)\n write(predicted_file, out_str)\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.48619958758354187, "alphanum_fraction": 0.5095541477203369, "avg_line_length": 22.575000762939453, "blob_id": "8d79d1025cd1d5ff985cf2bdd07862d2e9de39e9", "content_id": "bdc1f9838a805fd839f39f73f7d1f9fe366fe926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/15112-CMU/week9/problem5.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def loadBalance(lst):\n lstA = []\n lstB = []\n items = sorted(lst)\n for item in reversed(items):\n if sum(lstA) < sum(lstB):\n lstA += [item]\n else:\n lstB += [item]\n return (lstA, lstB)\n\nprint(loadBalance([3, 6, 1, 7, 9, 8, 22, 3]))\n\n\n# subsets\ndef powerset(a):\n if len(a) == 0:\n return [[]]\n else:\n partial = powerset(a[1:])\n allSubsets = []\n for subset in partial:\n allSubsets.append(subset)\n allSubsets.append([a[0]] + subset)\n # print(allSubsets)\n return allSubsets\nprint(powerset([1,2,3]))\n\n\ndef permutations(a):\n if len(a) == 1:\n return [a]\n else:\n partial = permutations(a[1:])\n allPerms = []\n for subPerm in partial:\n for i in range(len(subPerm) + 1):\n allPerms.append(subPerm[:i]+[a[0]]+subPerm[i:])\n return allPerms\nprint(permutations([1,2,3]))" }, { "alpha_fraction": 0.5905882120132446, "alphanum_fraction": 0.5996638536453247, "avg_line_length": 26.546297073364258, "blob_id": "ea7e062077d01ee2d703ea0a7bf94ed5e47e3efe", "content_id": "c30ab85dbe0b1f70c5c06c734fb8616beca24b5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2975, "license_type": "no_license", "max_line_length": 72, "num_lines": 108, "path": "/15112-CMU/week8/demo.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport random\nclass Dot(object):\n def __init__(self, cx, cy):\n self.cx = cx\n self.cy = cy\n self.r = random.randint(5,20)\n self.color = random.choice([\"red\", \"green\", \"blue\"])\n\n def draw(self, canvas):\n canvas.create_oval(self.cx-self.r, self.cy-self.r,\n self.cx+self.r, self.cy+self.r, fill=self.color)\n\n\nclass MovingDot(Dot):\n def __init__(self, cx, cy):\n super().__init__(cx, cy)\n self.speed = random.randint(3, 15)\n\n def move(self):\n self.cx += self.speed\n\n\ndef init(data):\n # load data.xyz as appropriate\n data.dots = []\n data.isMoving = False\n\n\ndef mousePressed(event, data):\n # use event.x and event.y\n if data.isMoving:\n data.dots.append(Dot(event.x, event.y))\n data.isMoving = False\n else:\n data.dots.append(MovingDot(event.x, event.y))\n data.isMoving = True\n\n print(data.dots)\n\n\ndef keyPressed(event, data):\n # use event.char and event.keysym\n pass\n\ndef keyPressed(event, data):\n # use event.char and event.keysym\n pass\n\ndef timerFired(data):\n for dot in data.dots:\n if isinstance(dot, MovingDot):\n dot.move()\n\ndef redrawAll(canvas, data):\n # draw in canvas\n for dot in data.dots:\n dot.draw(canvas)\n\n####################################\n# use the run function as-is\n####################################\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 200)\n" }, { "alpha_fraction": 0.7393617033958435, "alphanum_fraction": 0.75, "avg_line_length": 26, "blob_id": "0c10c5aca5412c244749d76b402d85e5fdb0309d", "content_id": "69793f24560306314ac0fa024972d33e4083c464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 188, "license_type": "no_license", "max_line_length": 58, "num_lines": 7, "path": "/15112-CMU/FIFAworldcup copy/README.md", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# Installation:\n\n1. Install pygame package: `python -m pip install pygame`\n2. Install numpy package: `python -m pip install numpy`\n\n# Play\nread playinstruction file to have more detail." }, { "alpha_fraction": 0.6239781975746155, "alphanum_fraction": 0.638563871383667, "avg_line_length": 28.99519157409668, "blob_id": "e8fb2e578ec0d9d0a82edca52c062e063106ee94", "content_id": "4b63f94ea66b7f9660d71056e8014852ab617b86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6251, "license_type": "no_license", "max_line_length": 108, "num_lines": 208, "path": "/15112-CMU/FIFAworldcup copy/Team.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame, sys\nimport os\nimport time\nimport random\n\nfrom const import *\nfrom Player import *\nfrom Computer import *\nfrom Ball import BALL\nfrom Enum import *\n\n# for blue team\nDEFENDER_X = GAP_SIZE_WIDTH + (GAME_WIDTH / 7)\nMIDFIELDER_X = GAP_SIZE_WIDTH + (2 * GAME_WIDTH / 7)\nSTRIKER_X = GAP_SIZE_WIDTH + (3 * GAME_WIDTH / 7)\nMIDDLE_Y = TABLE_SCORE_HEIGHT + GAP_SIZE_HEIGHT + GAME_HEIGHT / 2\n\nSIZE_CONTROL_ICON = 18\n\nANGLE_MISS = 50\n\nclass Team(pygame.sprite.Group):\n def __init__(self):\n super(Team, self).__init__()\n self.players = []\n # self.index = -1\n self.player = None\n\n def changePlayer(self):\n if BALL.owner != self:\n self.player = self.getClosestPlayer(self.player)\n self.player.state = State.FREE\n\n def passBall(self):\n \"\"\"pass ball to team member\"\"\"\n curPlayer = self.player\n desPlayer = None\n angle = 50\n\n directVector = convertDirectVector(self.player.direction)\n #caculate vector and calculate angle\n for player in self.players:\n if player != curPlayer:\n tempVector = pygame.Vector2(player.rect.x - curPlayer.rect.x, player.rect.y - curPlayer.rect.y)\n tempAngle = abs( directVector.angle_to( tempVector ) )\n if tempAngle > 180:\n tempAngle = 360 - tempAngle\n\n if tempAngle < angle :\n angle = tempAngle\n desPlayer= player\n\n if desPlayer != None:\n passVector = pygame.Vector2( desPlayer.rect.x - curPlayer.rect.x, desPlayer.rect.y - curPlayer.rect.y)\n self.player = desPlayer\n\n length = passVector.length()\n if length > 500 :\n BALL.passBall( passVector.normalize() * 17 )\n elif length > 400 :\n BALL.passBall( passVector.normalize() * 15 )\n elif length > 300 :\n BALL.passBall( passVector.normalize() * 12 )\n elif length > 200 :\n BALL.passBall( passVector.normalize() * 10 )\n elif length > 100 :\n BALL.passBall( passVector.normalize() * 8)\n else:\n BALL.passBall( passVector.normalize() * 5 )\n else:\n BALL.passBall(convertDirectVector(self.player.direction).normalize() * 8)\n\n # self.state = State.ATTACK\n self.player.state = State.FREE\n\n def getClosestPlayer(self, exceptPlayer = None):\n # players = []\n import copy\n # players = copy.copy(self.players)\n # for player in self.players:\n # if player != exceptPlayer:\n # players.append(player)\n\n # minDistance = caculateDistance(self.players[0].controlRect.center, BALL.rect.center)\n # minPlayer = players[0]\n\n # minDistance = caculateDistance(self.players[0].controlRect.center, BALL.rect.center)\n # minPlayer = self.players[0]\n\n distanceDict = dict()\n for player in self.players:\n distance = caculateDistance(player.rect, BALL.rect)\n distanceDict[player] = distance\n\n minDistance = caculateDistance(self.players[0].controlRect.center, BALL.rect.center)\n minPlayer = self.players[0]\n\n for player, distance in distanceDict.items():\n if distance < minDistance:\n minDistance = distance\n minPlayer = player\n \n return minPlayer\n\n def attack(self):\n for player in self.players:\n if player != self.player:\n player.state = State.ATTACK\n self.player.state = State.FREE\n\nclass BlueTeam(Team):\n def __init__(self):\n super(BlueTeam, self).__init__()\n\n self.players = [\n # 2 Defender\n Player(DEFENDER_X, MIDDLE_Y - GAME_HEIGHT * 1 / 6),\n Player(DEFENDER_X, MIDDLE_Y + GAME_HEIGHT * 1 / 6),\n\n # 3 Midfielder\n Player(MIDFIELDER_X, MIDDLE_Y - GAME_HEIGHT * 2 / 6),\n Player(MIDFIELDER_X, MIDDLE_Y),\n Player(MIDFIELDER_X, MIDDLE_Y + GAME_HEIGHT * 2 / 6),\n\n # 2 Striker\n Player(STRIKER_X, MIDDLE_Y - GAME_HEIGHT * 1 / 6),\n Player(STRIKER_X, MIDDLE_Y + GAME_HEIGHT * 1 / 6),\n ]\n\n for player in self.players:\n self.add(player)\n\n self.index = len(self.players) - 1\n # self.index = 0\n self.player = self.players[self.index]\n\n imageControl = pygame.image.load(\"assets/images/others/sort-down-fill.png\")\n self.imageControl = pygame.transform.scale(imageControl, (SIZE_CONTROL_ICON, SIZE_CONTROL_ICON))\n\n # def handle(self):\n def control(self):\n self.attack()\n # self 就是blueTeam\n self.player.handle()\n for player in self.players:\n if player != self.player:\n player.performAction(self)\n\n# 画的箭头\n def draw(self, screen):\n super().draw(screen)\n screen.blit(self.imageControl, (self.player.rect.centerx - SIZE_CONTROL_ICON / 2,\n self.player.rect.centery - 2 / 3 * PLAYER_SIZE))\n\nclass RedTeam(Team):\n def __init__(self):\n super(RedTeam, self).__init__()\n self.players = [\n # 2 Defender\n Computer(BACKGROUND_WIDTH - DEFENDER_X, MIDDLE_Y - GAME_HEIGHT * 1 / 6),\n Computer(BACKGROUND_WIDTH - DEFENDER_X, MIDDLE_Y + GAME_HEIGHT * 1 / 6),\n\n # 3 Midfielder\n Computer(BACKGROUND_WIDTH - MIDFIELDER_X, MIDDLE_Y - GAME_HEIGHT * 2 / 6),\n Computer(BACKGROUND_WIDTH - MIDFIELDER_X, MIDDLE_Y),\n Computer(BACKGROUND_WIDTH - MIDFIELDER_X, MIDDLE_Y + GAME_HEIGHT * 2 / 6),\n\n # 2 Striker\n Computer(BACKGROUND_WIDTH - STRIKER_X, MIDDLE_Y - GAME_HEIGHT * 1 / 6),\n Computer(BACKGROUND_WIDTH - STRIKER_X, MIDDLE_Y + GAME_HEIGHT * 1 / 6),\n ]\n \n for player in self.players:\n self.add(player)\n\n self.index = 0\n self.player = self.players[self.index]\n \n def handle(self):\n if BALL.owner == self.player:\n self.player.state = State.COMPUTER\n elif BALL.owner != self.player:\n self.attack()\n # self.player = self.getClosestComputer()\n self.player = self.getClosestPlayer()\n self.player.state = State.FIND_BALL\n\n for player in self.players:\n player.performAction(self)\n\n # def getClosestComputer(self):\n # minDistance = caculateDistance(self.players[0].controlRect.center, BALL.rect.center)\n #\n # minComputer = self.players[0]\n #\n # for player in self.players:\n # distance = caculateDistance(player.rect, BALL.rect)\n #\n # if distance < minDistance:\n # minDistance = distance\n # minComputer = player\n #\n # return minComputer\n\n\n def attackComputer(self):\n self.attack()\n self.player.state = State.COMPUTER\n" }, { "alpha_fraction": 0.3571428656578064, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 14, "blob_id": "a64eddc9f223545c43bbf92de07d71bde7d2f746", "content_id": "6e5d271f769530d8da3dfe6998b0fa4018589ee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14, "license_type": "no_license", "max_line_length": 14, "num_lines": 1, "path": "/README.md", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# CMU-15112-HW" }, { "alpha_fraction": 0.3786407709121704, "alphanum_fraction": 0.40507012605667114, "avg_line_length": 29.360654830932617, "blob_id": "8c34f647bc3769de1af6215bba9e6f55f867fa61", "content_id": "9a25ab52a3c205218c85ab1a9fc3ee68e5c71652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1854, "license_type": "no_license", "max_line_length": 79, "num_lines": 61, "path": "/15112-CMU/week1/rec1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "###############################################################################\n# ---------------- 15-112 Recitation Week 1: Getting Started ---------------- #\n\n# This is a starter file of the problems we did in recitation. A good way to\n# use this file is to try to re-write problems you saw in recitation from\n# scratch. This way, you can test your understanding and ask on Piazza or\n# office hours if you have questions :)\n\n# --------------------------------------------------------------------------- #\n###############################################################################\n# Functions\n###############################################################################\n\n# write a function that returns n times 5\ndef timesFive(n):\n return 5 * n\n\n# write a function that returns n multiplied by m\ndef mult(n, m):\n return n * m\n\n# write a function that returns n raised to the p power\ndef pow(n,p):\n return n ** p\n\n# write a function that returns the 100s value of n\ndef returnHundredsValue(n):\n return (n // 100) % 10\n\n\n# write a function that calculates the slope of a line given two points\ndef slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)\n\n###############################################################################\n# Code Tracing\n###############################################################################\n\ndef ct(x):\n x -= 1\n print(x**2)\n x %= 4\n return ((x * 2) % 4) // 2\n\nprint(ct(6))\nprint(\"Hello World\")\n\n###############################################################################\n# Reasoning Over Code\n###############################################################################\n\ndef rc(n):\n assert(type(n) == int)\n if ((n < 0) or (n > 99)): return False\n d1 = n % 10 #onesdigit\n d2 = n // 10 #tensdigit\n m = 10 * d1 + d2\n return ((m < n) and (n < 12))\n\n\nprint(rc(234))\n\n\n" }, { "alpha_fraction": 0.7057926654815674, "alphanum_fraction": 0.7606707215309143, "avg_line_length": 28.81818199157715, "blob_id": "9a90533937f3822653293e7cde24b439202cd398", "content_id": "c2e0097e1bccf72ceaa7bd8c87699be552dc3073", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 656, "license_type": "no_license", "max_line_length": 130, "num_lines": 22, "path": "/15112-CMU/112-opencv-tutorial-master/README.md", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "<h1>Small snippets of code for 15-112 OpenCV Tutorials</h1>\n\nCreated by Vasu Agrawal and Kim Kleiven\n\nPlease follow this order when going through the files:\n\n 1. `OpeningImages.py`\n 2. `OpeningImagesResize.py`\n 3. `OpeningVideo.py`\n 4. `ManualThreshold.py`\n 5. `BetterThresholds.py`\n 6. `ManualErosionAndDilation.py`\n 7. `ErosionAndDilation.py`\n 8. `PerformanceTips.py`\n 9. `TrackingFaces.py`\n\nImage citations:\n * https://i.imgur.com/qzCO1Wb.jpg\n * http://vignette1.wikia.nocookie.net/runescape2/images/7/7f/Mask_of_Sliske%2C_Light_detail.png/revision/latest?cb=20140220151331\n\nHaarcascade citations:\n * https://github.com/opencv/opencv/tree/master/data/haarcascades\n" }, { "alpha_fraction": 0.7333619594573975, "alphanum_fraction": 0.7376556396484375, "avg_line_length": 47.52083206176758, "blob_id": "6f386d9f717424a70c4b41981d4ba051446783f0", "content_id": "da77244195f3426cee6bb462df3e9c744c268bde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2329, "license_type": "no_license", "max_line_length": 78, "num_lines": 48, "path": "/15112-CMU/112-opencv-tutorial-master/performanceTips.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ntips = \"\"\"\n General performance tips for your OpenCV Projects:\n\n1. Python is SLOW.\n Unfortunately, as awesome as Python is, those very features that make it\n awesome mean that it's going to be too slow for our purposes. To give you\n a concrete example, you're not going to be able to iterate over a single\n image using just Python for loops in 30 ms (i.e. 30 Hz) and get any\n meaningful work done.\n\n2. Learn Numpy, learn it well!\n Numpy is an amazing number manipulation library -- the standard, in fact,\n for any Python applications. It is all compiled C, and so, similar to how\n built in functions are orders of magnitude faster than their Python\n versions, Numpy operations will be orders of magnitude faster than the\n corresponding Python operations.\n\n3. Forget about iteration\n One of the keys to using Numpy well is vectorization -- that is, turning\n your iterative operations into something that can be applied to the whole\n array at once. For example, instead of iterating through an entire array\n in order to try to threshold it, we simply create a binary mask using >\n and then use an assignment to assign to all values at once.\n\n4. OpenCV is even faster than Numpy\n While Numpy is great for general purpose array manipulation, if you're\n trying to do specific manipulation tasks (i.e. thresholding, erode, etc),\n OpenCV has algorithms optimized specifically for that, and will thus be\n even faster than the Numpy implementation.\n\n5. Make it work, and then make it work fast.\n This is just a general optimization tip, but you should always make\n sure that your code works before attempting to make it fast. Who knows,\n maybe the naiive implementation is fast enough and you can get some sleep\n instead of optimizing your code?\n\n6. Profile your code in order to find performance bottlenecks.\n If you do need to optimize, make sure to profile your code using utilities\n like profile and time.time() to figure out which parts of your code are\n taking the longest to execute. For loops, start with the innermost and\n work your way out. If absolutely necessary, consider writing your loops\n in C, called via ctypes, Numpy's C API, Python's C API, or similar.\n\"\"\"\n\nif __name__ == \"__main__\":\n print tips\n" }, { "alpha_fraction": 0.5106559991836548, "alphanum_fraction": 0.5571015477180481, "avg_line_length": 34.31488037109375, "blob_id": "6a9e878cb9e47d84e3198431fa189a1b9c04d57a", "content_id": "58bfd97e4c8ae0fd8e295eb73015de32872c84e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20411, "license_type": "no_license", "max_line_length": 114, "num_lines": 578, "path": "/15112-CMU/week2/[email protected]_4_hw2_hw2_handin.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#################################################\n# Hw2\n# Your andrewID: mxu2\n# Your section: 2 N\n#################################################\n\nimport cs112_s19_week2_linter\n\n### You'll need isPrime for one of the problems, so it is provided here ###\ndef isPrime(n):\n if (n < 2):\n return False\n maxFactor = round(n**0.5)\n for factor in range(2, maxFactor+1):\n if (n % factor == 0):\n return False\n return True\n\n#################################################\n# Lab2 COLLABORATIVE LAB problems\n# (Their problem descriptions will be released Friday, Jan 25)\n#################################################\n# The problems in this section are LAB PROBLEMS, which means you MUST\n# work on these with at least one collaborator. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n# For lab problems, YOU MUST LIST AT LEAST ONE COLLABORATOR\n\ndef isSmithNumberCollaborators():\n return \"yufeiche\"\n\n\ndef sum_digits(n):\n sum = 0\n while n:\n sum += n % 10\n n //= 10\n return sum\n\n\ndef isSmithNumber(n):\n if isPrime(n):\n return False\n sumofdigit = sum_digits(n)\n sumOfPrimeFactor = 0\n for factor in range(2, n + 1):\n if isPrime(factor):\n while n % factor == 0:\n sumOfPrimeFactor += sum_digits(factor)\n n = n / factor\n if sumOfPrimeFactor == sumofdigit:\n return True\n else:\n return False\n\n\n### You can find drawFlagOfCuba in the Graphics section below ###\n\n#################################################\n# Hw2 COLLABORATIVE problem\n#################################################\n# The problems in this section are COLLABORATIVE, which means you may\n# work on them with your classmates if you wish. See the collaboration\n# policy in the syllabus for more details. Always list your collaborators!\n\n#### Debugging isMultiPowerfulNumber is a COLLABORATIVE problem ####\ndef isMultiPowerfulNumberCollaborators():\n return \"nobody\"\n\n# Bug 1: n % factor should be == 0, = means assign a value to some value, and in this same line\n# I put isPrime(factor) before n % factor for the simple reason that if I put n % factor before the key work and\n# when factor == 0, there will be a ZeroDivisionError. So if isPrime(0) is False, python will not check n % 0.\n# Bug 2: I replace return False after the second if statement with continue,\n# because if n % an square of factor is not equal to 0 the return False statement will ends the loop.\n# So, I replace it with continue.\n# Bug 3: The factorCount += 1 should move to right with on more indent.Because after the second if statement\n# if n % (factor**2) == 0, it should be prime factor for multi-powerful number and factorCount should plus 1.\n\n#### Insert the isMultiPowerfulNumber code here ####\ndef isMultiPowerfulNumber(n):\n factorCount = 0\n for factor in range(n):\n if isPrime(factor) and n % factor == 0:\n if n % (factor**2) != 0:\n continue\n factorCount += 1\n return factorCount > 1\n\n\n#################################################\n# Hw2 SOLO problems\n#################################################\n\ndef isKaprekarNumber(n):\n if n < 1:\n return False\n if n == 1:\n return True\n lenOfNumsquare = len(str(n**2))\n for i in range(1, lenOfNumsquare):\n A = n**2 // 10 ** i\n B = n**2 % 10 ** i\n if B != 0 and A + B == n:\n return True\n return False\n\n\ndef nthKaprekarNumber(n):\n found = -1\n guess = 0\n while (found < n):\n guess += 1\n if (isKaprekarNumber(guess)):\n found += 1\n return guess\n\n\ndef nearestKaprekarNumber(n):\n if n <= 1:\n return 1\n count = 1\n if type(n) == int:\n if isKaprekarNumber(n):\n return n\n while not isKaprekarNumber(n):\n a = n - count\n if isKaprekarNumber(a):\n return a\n b = n + count\n if isKaprekarNumber(b):\n return b\n count += 1\n if type(n) == float:\n import math\n rawnum = n\n count = 1\n if (n - int(n)) <= 0.5:\n if isKaprekarNumber(int(n)):\n return int(n)\n while not isKaprekarNumber(int(n)):\n a = int(n) - count\n if isKaprekarNumber(a):\n targeta = a\n break\n count += 1\n count = 1\n while not isKaprekarNumber(int(n)):\n b = int(n) + count\n if isKaprekarNumber(b):\n targetb = b\n break\n count += 1\n if abs(rawnum - targeta) <= abs(targetb - rawnum):\n return targeta\n if abs(rawnum - targeta) > abs(targetb - rawnum):\n return targetb\n if (n - int(n)) > 0.5:\n if isKaprekarNumber(math.ceil(n)):\n return math.ceil(n)\n while not isKaprekarNumber(math.ceil(n)):\n a = math.ceil(n) - count\n if isKaprekarNumber(a):\n targeta = a\n break\n count += 1\n count = 1\n while not isKaprekarNumber(math.ceil(n)):\n b = math.ceil(n) + count\n if isKaprekarNumber(b):\n targetb = b\n break\n count += 1\n if abs(rawnum - targeta) <= abs(targetb - rawnum):\n return targeta\n if abs(rawnum - targeta) > abs(targetb - rawnum):\n return targetb\n\n\n### The three following problems are bonus problems, and therefore optional ###\n# Note: Bonus problems are solo. Do not collaborate on bonus problems.\n\ndef squaresGenerator():\n return\n\ndef nswGenerator():\n return\n\ndef nswPrimesGenerator():\n return\n\n#################################################\n# Hw2 Graphics Functions\n# All graphics must go under here to avoid angering the autograder!\n# ignore_rest\n#################################################\n\nfrom tkinter import *\n\n### Note that drawFlagOfCuba is COLLABORATIVE and a LAB problem ###\ndef drawFlagOfCubaCollaborators():\n return \"afu1\"\n\ndef drawFlagOfCuba(canvas, width, height):\n heightofrec = height / 5\n for row in range(5):\n left = 0\n top = 0 + row * heightofrec\n right = width\n bottom = width * heightofrec\n if row == 0 or row == 2 or row == 4:\n color = \"darkblue\"\n else:\n color = \"white\"\n canvas.create_rectangle(left, top, right, bottom, fill=color)\n canvas.create_polygon(0, 0, 0, height, (3**0.5)/2 * height, height/2, fill=\"red3\")\n (cx, cy, r) = ((3**0.5)/6 * height, height/2, 0.8 * heightofrec)\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, fill=\"white\")\n\n\n### Note that drawThreadPattern is COLLABORATIVE ###\ndef drawThreadPatternCollaborators():\n return \"nobody\"\n\n\ndef drawThreadPattern(canvas, size, numSpokes, startSpoke, numSkips):\n import math\n (cx, cy, r) = (size/2, size/2, size/2 * 0.9)\n # draw a large circle\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, outline='black', width=r/30)\n # draw small circles\n for i in range(numSpokes):\n iAngle = math.pi / 2 - (2 * math.pi) * (i / numSpokes) - math.pi\n ix = cx + r * math.cos(iAngle)\n iy = cy - r * math.sin(iAngle)\n # ixt = cx + 0.85 * r * math.cos(iAngle)\n # iyt = cy - 0.85 * r * math.sin(iAngle)\n # canvas.create_text(ixt, iyt, text= str(i), font=\"Arial 16 bold\", width=10)\n if i == startSpoke:\n canvas.create_oval(ix - (r/20)*1.2, iy - (r/20)*1.1, ix + (r/20)*1.1, iy + (r/20) * 1.1,\n fill='green', outline='black', width=1)\n else:\n canvas.create_oval(ix - (r / 20) * 1.2, iy - (r / 20) * 1.1, ix + (r / 20) * 1.1, iy + (r / 20) * 1.1,\n fill='red', outline='black', width=1)\n # draw threads\n for n in range(numSpokes):\n if n == startSpoke:\n begin = True\n fixAngle = math.pi / 2 - (2 * math.pi) * (n / numSpokes) - math.pi\n xfix = cx + 0.95 * r * math.cos(fixAngle)\n yfix = cy - 0.95 * r * math.sin(fixAngle)\n nAngle = math.pi / 2 - (2 * math.pi) * (n / numSpokes) - math.pi\n nxold, nyold = xfix, yfix\n while begin:\n nAnglenew = nAngle - (2 * math.pi) * (numSkips/numSpokes)\n nxnew = cx + 0.95 * r * math.cos(nAnglenew)\n nynew = cy - 0.95 * r * math.sin(nAnglenew)\n canvas.create_line(nxold, nyold, nxnew, nynew, width=1)\n nAngle = nAnglenew\n if abs(nxnew - xfix) < 0.0001 and abs(nynew - yfix) < 0.0001:\n break\n nxold, nyold = nxnew, nynew\n\n\n### Note that drawSteelersLogo is SOLO ###\ndef drawSteelersLogo(canvas, x, y, r):\n sizeOfFont = int(r/4.3)\n (cx, cy, r) = (x, y, 0.95 * r)\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, fill=\"white\", outline=\"gray\", width=r/10)\n # define the centers of diamonds\n (goldx, goldy) = (cx, cy - r / 2)\n (bluex, bluey) = (cx, cy + r / 2)\n (redx, redy) = (cx + r / 2, cy)\n canvas.create_polygon(goldx, goldy + 0.8 * r/2, goldx + 0.8 * r/2, goldy,\n goldx, goldy - 0.8 * r/2, goldx - 0.8 * r/2, goldy, fill=\"gold\")\n canvas.create_polygon(bluex, bluey - 0.8 * r/2, bluex + 0.8 * r/2, bluey,\n bluex, bluey + 0.8 * r/2, bluex - 0.8 * r/2, bluey, fill=\"blue\")\n canvas.create_polygon(redx - 0.8 * r/2, redy, redx, redy - 0.8 * r/2,\n redx + 0.8 * r/2, redy, redx, redy + 0.8 * r/2, fill=\"red\")\n canvas.create_text(cx, cy, text=\"Steelers\",\n fill=\"black\", font=('Times', sizeOfFont, 'bold'), anchor=\"e\")\n\n\n### Note that drawButtonPattern is SOLO ###\ndef drawCircle(canvas, x0, y0, x1, y1, r, color):\n (cx, cy, r) = (x0 + (x1 - x0)/2, y0 + (y1 - y0)/2, r)\n while r >= 1:\n canvas.create_oval(cx - r, cy - r, cx + r, cy + r, fill=color, outline='black')\n r = (2/3) * r\n\n\ndef drawButtonPattern(canvas, size, n):\n canvas.create_rectangle(0, 0, size, size, fill=\"purple\")\n width = size / n\n r = width / 2\n for col in range(n):\n for row in range(n):\n # print('col',col,'row', row)\n left = 0 + width * col\n top = 0 + width * row\n right = width * (col + 1)\n bottom = width * (row + 1)\n if (col + row) % 4 == 0:\n color = 'red'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if row % 3 == 0:\n if (col + row) % 4 != 0:\n color = 'green'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if col % 2 == 1:\n if ((row - 1) % 12) == 0 or ((row - 5) % 12 == 0):\n if (col - 1) % 4 == 0:\n color = 'yellow'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n if ((row - 7) % 12) == 0 or ((row - 11) % 12 == 0):\n if (col - 3) % 4 == 0:\n color = 'yellow'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n if ((row - 2) % 6 == 0) or ((row - 4) % 6 == 0):\n if col % 2 == 1:\n color = 'yellow'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if (row - 1) % 6 == 0 or (row - 5) % 6 == 0:\n if col % 2 == 0:\n color = 'blue'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if (row - 2) % 12 == 0 or (row - 10) % 12 == 0:\n if (col - 4) % 4 == 0:\n color = 'blue'\n drawCircle(canvas, left, top, right, bottom, r, color)\n if (row - 4) % 12 == 0 or (row - 8) % 12 == 0:\n if (col - 2) % 4 == 0:\n color = 'blue'\n drawCircle(canvas, left, top, right, bottom, r, color)\n\n\n\n#### Note that drawNiceRobot is BONUS, and therefore optional ####\ndef drawNiceRobot(canvas, width, height):\n pass\n\n#################################################\n# Hw2 Test Functions\n# ignore_rest\n#################################################\n\ndef testIsSmithNumber():\n print(\"Testing isSmithNumber()...\", end=\"\")\n assert(isSmithNumber(22) == True)\n assert(isSmithNumber(21) == False)\n assert(isSmithNumber(4) == True)\n assert(isSmithNumber(378) == True)\n assert(isSmithNumber(1) == False)\n assert(isSmithNumber(27) == True)\n assert(isSmithNumber(9) == False)\n assert(isSmithNumber(7) == False)\n print(\"Passed.\")\n\ndef runDrawFlagOfCuba(width, height):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # width must equal height\n assert(width == 2*height)\n drawFlagOfCuba(canvas, width, height)\n root.mainloop()\n\ndef testDrawFlagOfCuba():\n print(\"Testing drawFlagOfCuba()...\", end=\"\")\n runDrawFlagOfCuba(580, 290)\n runDrawFlagOfCuba(100, 50)\n runDrawFlagOfCuba(300, 150)\n print(\"Done.\")\n\ndef testIsMultiPowerfulNumber():\n print(\"Testing isMultiPowerfulNumber()...\", end=\"\")\n isMultiPowerfulNumber(36)\n isMultiPowerfulNumber(72)\n isMultiPowerfulNumber(100)\n isMultiPowerfulNumber(108)\n #print(\"NO TEST CASES YET! Write them yourself!\")\n print(\"Passed.\")\n\ndef runDrawThreadPattern(width, height, numSpokes, startSpoke, numSkips):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # width must equal height\n assert(width == height)\n drawThreadPattern(canvas, width, numSpokes, startSpoke, numSkips)\n root.mainloop()\n\ndef testDrawThreadPattern():\n print(\"Testing drawThreadPattern...\", end=\"\")\n runDrawThreadPattern(400, 400, 12, 0, 5)\n runDrawThreadPattern(200, 200, 10, 3, 4)\n runDrawThreadPattern(500, 500, 19, 8, 15)\n print(\"Done.\")\n\ndef runDrawSteelersLogo(width, height, x, y, r):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n drawSteelersLogo(canvas, x, y, r)\n root.mainloop()\n\ndef testDrawSteelersLogo():\n print(\"Testing drawSteelersLogo...\", end=\"\")\n runDrawSteelersLogo(300, 300, 150, 150, 100)\n runDrawSteelersLogo(500, 600, 300, 200, 200)\n runDrawSteelersLogo(150, 100, 50, 60, 40)\n print(\"Done.\")\n\ndef testIsKaprekarNumber():\n print(\"Testing isKaprekarNumber()...\", end=\"\")\n assert(isKaprekarNumber(0) == False)\n assert(isKaprekarNumber(1) == True)\n assert(isKaprekarNumber(4) == False)\n assert(isKaprekarNumber(9) == True)\n assert(isKaprekarNumber(36) == False)\n assert(isKaprekarNumber(45) == True)\n assert(isKaprekarNumber(450) == False)\n print(\"Passed.\")\n\ndef testNthKaprekarNumber():\n print(\"Testing nthKaprekarNumber()...\", end=\"\")\n assert(nthKaprekarNumber(0) == 1)\n assert(nthKaprekarNumber(1) == 9)\n assert(nthKaprekarNumber(2) == 45)\n assert(nthKaprekarNumber(3) == 55)\n assert(nthKaprekarNumber(4) == 99)\n assert(nthKaprekarNumber(5) == 297)\n assert(nthKaprekarNumber(6) == 703)\n assert(nthKaprekarNumber(7) == 999)\n print('Passed.')\n\ndef testNearestKaprekarNumber():\n print(\"Testing nearestKaprekarNumber()...\", end=\"\")\n assert(nearestKaprekarNumber(1) == 1)\n assert(nearestKaprekarNumber(0) == 1)\n assert(nearestKaprekarNumber(-1) == 1)\n assert(nearestKaprekarNumber(-2) == 1)\n assert(nearestKaprekarNumber(-12345) == 1)\n assert(nearestKaprekarNumber(1.234) == 1)\n assert(nearestKaprekarNumber(4.99999999) == 1)\n assert(nearestKaprekarNumber(5) == 1)\n assert(nearestKaprekarNumber(5.00000001) == 9)\n assert(nearestKaprekarNumber(27) == 9)\n assert(nearestKaprekarNumber(28) == 45)\n assert(nearestKaprekarNumber(45) == 45)\n assert(nearestKaprekarNumber(50) == 45)\n assert(nearestKaprekarNumber(51) == 55)\n assert(nearestKaprekarNumber(1611) == 999)\n assert(nearestKaprekarNumber(1612) == 2223)\n assert(nearestKaprekarNumber(2475.4) == 2223)\n assert(nearestKaprekarNumber(2475.5) == 2223)\n assert(nearestKaprekarNumber(2475.51) == 2728)\n assert(nearestKaprekarNumber(2475.6) == 2728)\n assert(nearestKaprekarNumber(995123) == 994708)\n assert(nearestKaprekarNumber(9376543) == 9372385)\n assert(nearestKaprekarNumber(13641234) == 13641364)\n print(\"Passed.\")\n\ndef runDrawButtonPattern(width, height, n):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # width must equal height\n assert(width == height)\n drawButtonPattern(canvas, width, n)\n root.mainloop()\n\ndef testDrawButtonPattern():\n print(\"Testing drawButtonPattern()...\", end=\"\")\n runDrawButtonPattern(400, 400, 10)\n runDrawButtonPattern(300, 300, 5)\n runDrawButtonPattern(250, 250, 25)\n print(\"Done.\")\n\ndef runDrawNiceRobot(width, height):\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n canvas = Canvas(root, width=width, height=height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n drawNiceRobot(canvas, width, height)\n root.mainloop()\n\ndef testDrawNiceRobot():\n print(\"Testing drawNiceRobot()...\", end=\"\")\n runDrawNiceRobot(500, 500)\n runDrawNiceRobot(250, 250)\n print(\"Done.\")\n\ndef testSquaresGenerator():\n print(\"Testing squaresGenerator()...\", end=\"\")\n g = squaresGenerator()\n assert(next(g) == 1)\n assert(next(g) == 4)\n assert(next(g) == 9)\n assert(next(g) == 16)\n\n # ok, now with a for loop.\n squares = \"\"\n for square in squaresGenerator():\n if (squares != \"\"): squares += \", \"\n squares += str(square)\n if (square >= 100): break\n assert(squares == \"1, 4, 9, 16, 25, 36, 49, 64, 81, 100\")\n print(\"Passed.\")\n\ndef testNswGenerator():\n print(\"Testing nswGenerator()...\", end=\"\")\n nswNumbers = \"\"\n for nswNumber in nswGenerator():\n if (nswNumbers != \"\"): nswNumbers += \", \"\n nswNumbers += str(nswNumber)\n if (nswNumber >= 152139002499): break\n # from: http://oeis.org/A001333\n assert(nswNumbers == \"1, 1, 3, 7, 17, 41, 99, 239, 577, 1393, 3363, 8119, \"\n \"19601, 47321, 114243, 275807, 665857, 1607521, 3880899, \"\n \"9369319, 22619537, 54608393, 131836323, 318281039, \"\n \"768398401, 1855077841, 4478554083, 10812186007, \"\n \"26102926097, 63018038201, 152139002499\"\n )\n print(\"Passed.\")\n\ndef testNswPrimesGenerator():\n print(\"Testing nswPrimesGenerator()...\", end=\"\")\n nswPrimes = \"\"\n for nswPrime in nswPrimesGenerator():\n if (nswPrimes != \"\"): nswPrimes += \", \"\n nswPrimes += str(nswPrime)\n if (nswPrime >= 63018038201): break\n # from: http://oeis.org/A088165\n assert(nswPrimes == \"7, 41, 239, 9369319, 63018038201\")\n print(\"Passed.\")\n\n#################################################\n# Hw2 Main\n#################################################\n\ndef testAll():\n ### Lab problems ###\n testIsSmithNumber()\n testDrawFlagOfCuba()\n ### Collaborative problems ###\n\n testIsMultiPowerfulNumber()\n testDrawThreadPattern()\n\n ### Solo problems ###\n testDrawSteelersLogo()\n testIsKaprekarNumber()\n testNthKaprekarNumber()\n testNearestKaprekarNumber()\n testDrawButtonPattern()\n\n # Uncomment the next lines if you want to try the bonus!\n #testDrawNiceRobot()\n #testSquaresGenerator()\n #testNswGenerator()\n #testNswPrimesGenerator()\n\ndef main():\n cs112_s19_week2_linter.lint() # check for banned tokens\n testAll()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5848203897476196, "alphanum_fraction": 0.5960028767585754, "avg_line_length": 33.74380111694336, "blob_id": "fb7a1aa1cffe2dbb6373093a995a77fd2491d1b9", "content_id": "98fc9d75ef9d00b833fad9b74c88aeca5b79d144", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4203, "license_type": "no_license", "max_line_length": 80, "num_lines": 121, "path": "/15112-CMU/week4 cold cold/case4.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "# Side Scrolling Demo\nfrom tkinter import *\ndef init(data):\n data.mapWidth = data.width * 3\n data.bushes = []\n data.bushSize = 50\n for bushX in range(0, data.mapWidth, data.bushSize):\n data.bushes.append([bushX, \"green\"])\n data.playerX = 40\n data.playerSize = 30\n\n data.scrollX = 0\n data.groundY = data.height*2/3\n\ndef mousePressed(event, data):\n # In mousePressed, we need to move the mouse from the VIEW to the MAP\n # We do this by adding the scroll position to the position clicked\n\n viewX = event.x\n x = data.scrollX + viewX\n y = event.y\n # Now check if there's a bush in there\n for bush in data.bushes:\n if bush[0] <= x <= (bush[0] + data.bushSize) and \\\n (data.groundY - data.bushSize/2) <= y <= data.groundY:\n bush[1] = \"purple\"\n\ndef keyPressed(event, data):\n # We often choose to move the screen based on player input\n # In this example, we'll move when the player reaches a buffer\n # on either side of the screen.\n # We calculate this buffer based on the MAP and the SCROLL POSITION\n\n playerSpeed = 10\n if event.keysym == \"Left\":\n data.playerX -= playerSpeed\n elif event.keysym == \"Right\":\n data.playerX += playerSpeed\n\n # Move the window if the player is about to move off the screen\n buffer = 10\n # Need to compare player's map position to the\n # scroll position plus the screen size\n if (data.playerX + data.playerSize + buffer) >= (data.scrollX + data.width):\n data.scrollX += playerSpeed\n elif (data.playerX - buffer) <= data.scrollX:\n data.scrollX -= playerSpeed\n\ndef timerFired(data):\n pass\n\ndef redrawAll(canvas, data):\n # When drawing things, we need to move them from the MAP to the VIEW\n # We do this by subtracting the scroll position from the map position\n\n # draw the bushes\n for bush in data.bushes:\n [bushX, color] = bush\n # We'll draw some things offscreen, but that's okay!\n canvas.create_oval(bushX - data.scrollX,\n data.groundY - data.bushSize/2,\n bushX + data.bushSize - data.scrollX,\n data.groundY + data.bushSize/2,\n fill=color)\n # draw the ground\n canvas.create_rectangle(0 - data.scrollX,\n data.groundY,\n data.mapWidth - data.scrollX,\n data.height,\n fill=\"tan4\")\n # draw the player\n canvas.create_oval(data.playerX - data.scrollX,\n data.groundY - data.playerSize,\n data.playerX + data.playerSize - data.scrollX,\n data.groundY,\n fill=\"red\")\n canvas.create_text(10, 10, text=\"scrollX: \" + str(data.scrollX),\n font=\"Arial 25 bold\", anchor=\"nw\")\n\n\n\n\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"<Button-1>\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"<Key>\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAllWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(400, 200)" }, { "alpha_fraction": 0.6671082973480225, "alphanum_fraction": 0.6820139288902283, "avg_line_length": 33.712642669677734, "blob_id": "3b6165c69b134a17b384b3808a027930b7974b8d", "content_id": "eceba2c0af2ac7f9decf7af3015b2cc5e6f2bc71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3019, "license_type": "no_license", "max_line_length": 80, "num_lines": 87, "path": "/15112-CMU/112-opencv-tutorial-master/betterThresholds.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# This file contains a few different variations on thresholding, showing some\n# various functionality of numpy as well as finally how to do it with openCV\n\nimport cv2\nimport numpy as np\n\n# Define our constants\nWHITE = 255\nBLACK = 0\nTHRESH = 127\n\n# Thresholding using numpy iterators\ndef iter_threshold(image):\n # Returns us a copy, so we can modify it\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # nditer gives us a memory-efficient iterator over the array, which we\n # can then write to with special numpy syntax\n for val in np.nditer(grey, op_flags=['readwrite']):\n val[...] = WHITE if val > THRESH else BLACK\n return grey\n\n# Binary mask in numpy\n# We can index into an array with another array (the binary mask), and\n# by assigning a scalar value we assign to all values that were masked in.\ndef mask_threshold(image):\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Index into grey with a binary mask, and assign binary values\n grey[grey > THRESH] = WHITE\n grey[grey <= THRESH] = BLACK\n return grey\n\n# And finally, we demonstrate the OpenCV thresholding function, which\n# is able to use some more advanced thresholds rather than a fixed constant.\ndef threshold(image):\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # There are a few thresholding modes available. In this one, the output\n # goes to 0 if grey(x, y) <= THRESH, else WHITE. See docs for other options.\n _, thresholded = cv2.threshold(grey, THRESH, WHITE, cv2.THRESH_BINARY)\n return thresholded\n\n# And finally, we demonstrate the OpenCV thresholding function, which\n# is able to use some more advanced thresholds rather than a fixed constant.\n# We also blur our image first in order to remove some noise.\ndef threshold_otsu(image):\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n value = (31, 31)\n blurred = cv2.GaussianBlur(grey, value, 0)\n # Otsu thresholding is able to automtically determine what the threshold\n # value should be. Currently only works on 8 bit images.\n # We also use _ for the return value to simply ignore it.\n _, thresholded = cv2.threshold(blurred, 0, WHITE,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n return thresholded\n\n# Modified to take a variety of threshold functions\ndef main(fn):\n\n window_name = \"Webcam!\"\n\n cam_index = 0\n cv2.namedWindow(window_name, cv2.CV_WINDOW_AUTOSIZE)\n\n cap = cv2.VideoCapture(cam_index)\n cap.open(cam_index)\n\n while True:\n\n ret, frame = cap.read()\n\n if frame is not None:\n # Instead of showing the original image, show the thresholded one\n cv2.imshow(window_name, fn(frame))\n \n k = cv2.waitKey(1) & 0xFF\n if k == 27: # Escape key\n cv2.destroyAllWindows()\n cap.release()\n break\n\n# Run through all of the demonstrations\nif __name__ == \"__main__\":\n main(iter_threshold)\n main(mask_threshold)\n main(threshold)\n main(threshold_otsu)" }, { "alpha_fraction": 0.5541591644287109, "alphanum_fraction": 0.5595451593399048, "avg_line_length": 25.919355392456055, "blob_id": "1a849c9a937f40266a10180a107fe55f43cad2bd", "content_id": "d4db8f16481e0e338f39d0fab930e3038e82ed5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1671, "license_type": "no_license", "max_line_length": 103, "num_lines": 62, "path": "/15112-CMU/week9/sublist.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def perm(lst):\n if len(lst) == 0:\n return []\n elif len(lst) == 1:\n return [lst]\n else:\n result = []\n for i in range(len(lst)):\n x = lst[i]\n xs = lst[:i] + lst[i + 1:]\n for p in perm(xs):\n result.append([x] + p)\n return result\n\n\n# def getSubLists(lst):\n# outPut = [[]]\n# for i in range(len(lst)):\n# for j in range(len(outPut)):\n# outPut.append(outPut[j] + [lst[i]])\n# outPut.remove([])\n# return outPut\n\n# Problem: given a list, a, produce a list containing all the possible subsets of a.\ndef powerset(a):\n # Base case: the only possible subset of an empty list is the empty list.\n if (len(a) == 0):\n return [ [] ]\n else:\n # Recursive Case: remove the first element, then find all subsets of the remaining list.\n # Then duplicate each subset into two versions: one without the first element, and one with it.\n partialSubsets = powerset(a[1:])\n allSubsets = [ ]\n for subset in partialSubsets:\n allSubsets.append(subset)\n allSubsets.append([a[0]] + subset)\n return allSubsets\n\n\ndef getOtherPart(lst, left):\n import copy\n lfCopy = copy.deepcopy(left)\n res = []\n for c in lst:\n if c not in lfCopy:\n res += [c]\n else:\n lfCopy.remove(c)\n return res\n\n\ndef divideAlistIntoTwoParts(lst):\n res = []\n allSubsets = powerset(lst)\n print(allSubsets)\n for left in allSubsets:\n otherPart = getOtherPart(lst, left)\n res.append((left, otherPart))\n return res\n\n\nprint(divideAlistIntoTwoParts([0,1,2]))\n\n\n" }, { "alpha_fraction": 0.5120859742164612, "alphanum_fraction": 0.535064160823822, "avg_line_length": 32.18811798095703, "blob_id": "a6b28b7264f940e14b98eaaf02e7e13f28a05bb3", "content_id": "8a9b2ce18418dd9d705814ab39850e8bf20e0153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3351, "license_type": "no_license", "max_line_length": 80, "num_lines": 101, "path": "/15112-CMU/week4 cold cold/gai.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def valueOfLetter(letterScores, letter):\n indexOfLetter = ord(letter) - 97\n value = letterScores[indexOfLetter]\n return value\n\n\ndef getScoresOfWords(letterScores, targetWordLst):\n scores = []\n for i in range(len(targetWordLst)):\n score = 0\n for j in range(len(targetWordLst[i])):\n score += valueOfLetter(letterScores, targetWordLst[i][j])\n scores.append(score)\n return scores\n\n\ndef getIndexOfTarget(idx, scoreList, maxScore):\n i = 0\n while i < len(scoreList):\n if scoreList[i] == maxScore:\n idx.append(i)\n i += 1\n return idx\n\ndef finalTarget(lst, hand):\n handStr = \"\"\n for s in hand:\n handStr += s\n newList = []\n for c in lst:\n for char in c:\n if c.count(char) == handStr.count(char):\n if c not in newList:\n newList.append(c)\n return newList\n\n\ndef bestScrabbleScore(dictionary, letterScores, hand):\n targetWordLst = []\n for word in dictionary:\n CharInHand = True\n for char in word:\n if char in hand:\n CharInHand = True\n else:\n CharInHand = False\n break\n if CharInHand == True:\n targetWordLst.append(word)\n targetWordLst = finalTarget(targetWordLst, hand)\n scoreList = getScoresOfWords(letterScores, targetWordLst)\n if scoreList == []:\n return None\n maxScore = max(scoreList)\n idx = []\n idx = getIndexOfTarget(idx, scoreList, maxScore)\n resultList = []\n for c in idx:\n resultList.append(targetWordLst[c])\n if len(resultList) == 1:\n return(resultList[0], maxScore)\n else:\n return(resultList, maxScore)\n\n\ndef testBestScrabbleScore():\n print(\"Testing bestScrabbleScore()...\", end=\"\")\n def d1(): return [\"a\", \"b\", \"c\"]\n def ls1(): return [1] * 26\n def d2(): return [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\n def ls2(): return [1 + (i % 5) for i in range(26)]\n assert(bestScrabbleScore(d1(), ls1(), [\"b\"]) == (\"b\", 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"a\", \"c\", \"e\"]) == ([\"a\", \"c\"], 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"b\"]) == (\"b\", 1))\n assert(bestScrabbleScore(d1(), ls1(), [\"z\"]) == None)\n # x = 4, y = 5, z = 1\n # [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\n # 10 10 7 10 9 -\n assert(bestScrabbleScore(d2(), ls2(), [\"x\",\"y\",\"z\"]) == ([\"xyz\",\"zxy\"], 10))\n assert(bestScrabbleScore(d2(), ls2(),\n [\"x\", \"y\", \"z\", \"y\"]) == ([\"xyz\", \"zxy\", \"yy\"], 10))\n assert(bestScrabbleScore(d2(), ls2(), [\"x\", \"y\", \"q\"]) == (\"yx\", 9))\n assert(bestScrabbleScore(d2(), ls2(), [\"y\", \"z\", \"z\"]) == (\"zzy\", 7))\n assert(bestScrabbleScore(d2(), ls2(), [\"w\", \"x\", \"z\"]) == None)\n print(\"Passed.\")\n\ntestBestScrabbleScore()\n\n# print(findAllPossibleWordsOfHand([\"x\",\"y\",\"z\"]))\n\n\ndef d1(): return [\"a\", \"b\", \"c\"]\ndef ls1(): return [1] * 26\ndef d2(): return [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\ndef ls2(): return [1 + (i % 5) for i in range(26)]\n# print(bestScrabbleScore(d2(), ls2(), [\"x\",\"y\",\"z\"]))\n#print(bestScrabbleScore(d1(), ls1(), [\"b\"]))\n# print(bestScrabbleScore(d1(), ls1(), [\"a\", \"c\", \"e\"]))\n# print(bestScrabbleScore(d1(), ls1(), [\"z\"]))\n# print(bestScrabbleScore(d2(), ls2(),\n# [\"x\", \"y\", \"z\", \"y\"]))" }, { "alpha_fraction": 0.6185441017150879, "alphanum_fraction": 0.630744218826294, "avg_line_length": 27.941177368164062, "blob_id": "c613b1b034209cbefe9a47d4bfece50d92ecee5a", "content_id": "de470751f4d7a1d3226fe247bfa14aadf70d0f8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2459, "license_type": "no_license", "max_line_length": 87, "num_lines": 85, "path": "/15112-CMU/FIFAworldcup copy/Ball.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "import pygame\nimport os\nimport time\n\nfrom const import *\n\nSIZE = 12\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self):\n super(Ball, self).__init__()\n\n self.owner = None\n self.isRolling = False\n\n image = pygame.image.load('assets/images/ball/ball.png')\n self.image = pygame.transform.scale(image, (SIZE, SIZE))\n\n self.rect = self.image.get_rect()\n self.rect.centerx = BACKGROUND_WIDTH / 2\n self.rect.centery = TABLE_SCORE_HEIGHT + BACKGROUND_HEIGHT / 2\n\n self.velocity = pygame.Vector2(0, 0)\n self.friction = 0.97\n self.fieldSize = pygame.Rect(TOP_LEFT[0], TOP_LEFT[1], GAME_WIDTH, GAME_HEIGHT)\n\n def update(self, blueTeam, redTeam):\n if self.owner != None:\n self.velocity = pygame.Vector2(0, 0)\n elif self.velocity == (0, 0):\n self.isRolling = False\n else:\n self.rect.x += self.velocity.x\n self.rect.y += self.velocity.y\n #friction\n self.velocity *= self.friction\n \n if abs(self.velocity.x) < 1:\n self.velocity.x = 0\n if abs(self.velocity.y) < 1:\n self.velocity.y = 0\n\n #bound collision response\n if self.rect.left < self.fieldSize.left:\n self.rect.left = self.fieldSize.left\n self.velocity.x = -self.velocity.x\n\n if self.rect.right > self.fieldSize.right:\n self.rect.right = self.fieldSize.right\n self.velocity.x = -self.velocity.x\n\n if self.rect.top < self.fieldSize.top:\n self.rect.top = self.fieldSize.top\n self.velocity.y = -self.velocity.y\n \n if self.rect.bottom > self.fieldSize.bottom:\n self.rect.bottom > self.fieldSize.bottom\n self.velocity.y = -self.velocity.y\n \n #collition response with football player\n # if self.velocity.length() > 10:\n # for player in blueTeam:\n # if self.rect.colliderect(player.controlRect) and player != blueTeam.player:\n # self.velocity = -self.velocity\n # for player in redTeam:\n # if self.rect.colliderect(player.controlRect):\n # self.velocity = -self.velocity\n\n def passBall(self, vec):\n self.isRolling = True\n self.owner = None\n\n self.velocity = vec\n\n def shoot(self, vec):\n self.velocity = vec * 20\n self.isRolling = True\n self.owner = None\n\n def ballAfterGoal(self):\n self.rect.centerx = BACKGROUND_WIDTH / 2\n self.rect.centery = TABLE_SCORE_HEIGHT + BACKGROUND_HEIGHT / 2\n self.velocity = pygame.Vector2(0, 0)\n\nBALL = Ball()" }, { "alpha_fraction": 0.4698379933834076, "alphanum_fraction": 0.49224406480789185, "avg_line_length": 25.099098205566406, "blob_id": "6bd0a5c2be39f01dc2369920bfee9230d42d6568", "content_id": "fe35f04bec8df24b4085f0e37a9fb630ab594dc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2909, "license_type": "no_license", "max_line_length": 73, "num_lines": 111, "path": "/15112-CMU/untitled folder 2/final practice.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def isConceitedNum(num):\n res = 0\n lenOfnum = len(str(num))\n for i in range(lenOfnum):\n res += int(str(num)[i])**lenOfnum\n return res == num\n\n\ndef nthConceitedNumber(n):\n count = 0\n num = 0\n while count < n:\n num += 1\n if isConceitedNum(num):\n count += 1\n return num\n\nprint(nthConceitedNumber(11))\n\n\ndef outOfBounds(currRow, currCol, L):\n return not (0 <= currRow <= len(L) and (0 <= currCol <= len(L[0])))\n\n\n# 此题不会\ndef spiralJoin(L):\n direction = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n directionIndex = 0\n seen = set()\n result = \"\"\n location = (0, 0)\n while len(seen) != len(L)*len(L[0]):\n currRow, currCol = location\n drow = direction[directionIndex][0] #0\n dcol = direction[directionIndex][1] #1\n if location not in seen and not outOfBounds(currRow, currCol, L):\n seen.add(location)\n result += L[currRow][currCol]\n location = (drow + currRow, dcol + currCol)\n else:\n oldRow, oldCol = currRow - drow, currCol - dcol\n directionIndex = (directionIndex + 1) % 4\n drow = direction[directionIndex][0]\n dcol = direction[directionIndex][1]\n location = (oldRow + drow, oldCol + dcol)\n return result\n\nL = [[\"a\", \"b\", \"c\", \"de\"],\n [\"fgh\", \"ijk\", \"lm\", \"n\"],\n [\"q\", \"rse\", \"t\", \"u\"]]\n\ndef testSpiralJoin():\n print(\"Testing spiralJoin...\")\n L = [[\"a\", \"b\", \"c\", \"de\"],\n [\"fgh\", \"ijk\", \"lm\", \"n\"],\n [\"q\", \"rse\", \"t\", \"u\"]]\n L1 =[[\"18\", \"3\"],\n [\"gg\", \"qq\"]]\n L2 = [[\"eat\"]]\n assert(spiralJoin(L) == \"abcdenutrseqfghijklm\")\n assert(spiralJoin(L1) == \"183qqgg\")\n assert(spiralJoin(L2) == \"eat\")\n print(\"testing passed!\")\n\n# testSpiralJoin()\n# print(spiralJoin(L))\n\n\ndef numberWooTriple(L):\n result = set()\n s = set(L) # O(N)\n for i in range(len(L)):\n for j in range(len(L)):\n k = L[i] ** 2 % L[j]\n t = (L[i], L[j], k)\n t = tuple(sorted(t))\n if k in s: # O(1)\n result.add(t)\n return result\n\nprint(numberWooTriple([1,3,2]))\n\ndef check(L):\n if len(L) == 1:\n if int(L[0]**0.5) != L[0]**0.5:\n return False\n for i in range(len(L)):\n if i != len(L) - 1:\n if int((L[i] + L[i + 1]) ** 0.5) \\\n != (L[i] + L[i + 1]) ** 0.5:\n return False\n return True\n\ndef helper(L, result):\n if len(L) == 0 and check(L):\n return result\n if len(L) == 0:\n return None\n for elem in L:\n if check(result + [elem]):\n result.append(elem)\n L.remove(elem)\n tmp = helper(L, result)\n if tmp is not None:\n return tmp\n return None\n\ndef getSquarefulArrangement(L):\n return helper(L, [])\n\nprint(getSquarefulArrangement([1,17,8]))\n\n\n\n\n" }, { "alpha_fraction": 0.4563997983932495, "alphanum_fraction": 0.49515554308891296, "avg_line_length": 21.272727966308594, "blob_id": "5723f70def773943d7fc6157fd9c8ceb12dcc6ce", "content_id": "2fe6503311095d1200fcb93b50ebe72bfebe953c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1961, "license_type": "no_license", "max_line_length": 54, "num_lines": 88, "path": "/15112-CMU/week10/case1.py", "repo_name": "MingXu-123/CMU-15112-HW", "src_encoding": "UTF-8", "text": "def helper(n, lst):\n if 0 not in lst:\n return lst\n for i in range(len(lst)):\n if i + n + 1 <= len(lst):\n if lst[i] == 0 and lst[i + n + 1] == 0:\n lst[i] = n\n lst[i + n + 1] = n\n tmp = helper(n - 1, lst)\n if tmp is not None:\n return tmp\n lst[i] = 0\n lst[i + n + 1] = 0\n return None\n\n\ndef distList(n):\n lenOflst = 2*n\n lst = [0]*lenOflst\n return helper(n, lst)\n\nprint(distList(4))\n\n\n\ndef HelperCount(lst, d):\n if len(lst) == 0:\n return d\n else:\n if lst[0] not in d:\n d[lst[0]] = 1\n return HelperCount(lst[1:], d)\n else:\n d[lst[0]] += 1\n return HelperCount(lst[1:], d)\n\n\ndef getItemCounts(lst):\n d = dict()\n return HelperCount(lst, d)\nprint(getItemCounts([\"a\", \"b\", \"c\", \"a\", \"a\", \"c\"]))\n\n\ndef packHelper(items, bagSizes, res):\n if len(items) == 0:\n return res\n\ndef packItem(items, bagSizes):\n res = []\n return packHelper(items, bagSizes, res)\n\nprint(packItem([4,8,1,4,3], [12,9]))\nprint(packItem([4,8,1,4,3], [10,10]))\n\ndef loadBalance(a, b, L):\n if len(L) == 0:\n return a, b\n item = L.pop(0)\n try1Box1, try1Box2 = loadBalance(a + [item], b, L)\n try2Box1, try2Box2 = loadBalance(a, b + [item], L)\n diff1 = abs(sum(try1Box1) - sum(try1Box2))\n diff2 = abs(sum(try2Box1) - sum(try2Box2))\n if diff1 > diff2:\n return try1Box1, try1Box2\n else:\n return try2Box1, try2Box2\n\n\n\ndef visualizeRecursion(f):\n depth = 0\n def g(*args, **kwargs):\n nonlocal depth\n depth += 1\n res = f(*args, **kwargs)\n depth -= 1\n s = \"\\t\" * depth + \"recursion depth: \" \\\n + str(depth) + ', result: ' + str(res)\n print(s)\n return res\n return g\n\n\n@visualizeRecursion\ndef fact(n):\n if n == 0: return 1\n return n * fact(n-1)\nfact(4)\n\n" } ]
132
auxo86/Flask_WebAPI
https://github.com/auxo86/Flask_WebAPI
3998b4c947aabae69b8bce412eac90f98e4b254a
e060ab041bf5142a65401a2d1baf2091a9121ded
ba82d8becfe15a50e7c18245f2895f920dd4990b
refs/heads/master
2021-01-23T20:23:22.720076
2017-09-26T06:54:49
2017-09-26T06:54:49
102,215,164
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.565270185470581, "alphanum_fraction": 0.5892531871795654, "avg_line_length": 27.39655113220215, "blob_id": "ad940e03be7ed23ee089ac7e6854e09589e4e577", "content_id": "bc218621f9f4e39f89c9593136ef26bdcb0e82b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3578, "license_type": "no_license", "max_line_length": 123, "num_lines": 116, "path": "/dataGenerator.py", "repo_name": "auxo86/Flask_WebAPI", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# !~/flask/bin/python3\n\nfrom flask import Flask, jsonify, render_template\nfrom flask_cors import CORS, cross_origin\nimport sqlite3\nimport os\nimport json\n\napp = Flask(__name__)\nCORS(app)\n\n'''\n本程式用於產生餵給D3.js GEO功能的JSON dict資料\n讀取sqlite3,產生字典,然後轉成JSON\n'''\n\n# 設定語言環境\nos.environ['NLS_LANG'] = 'TRADITIONAL CHINESE_TAIWAN.AL32UTF8'\n# 要讀取的資料表名稱\nstr_tableNameForGet = \"N_DIAGDATE_GEODATA\"\n# 要寫入的路徑名稱\nFileOutputDir = \"TeipeiD3jsDynamicWithZoom/\"\n# 設定錯誤寫入的log檔案\nfileErrLogName = str_tableNameForGet + \"_JSON_Err.log\"\n# 設定使用的sqlite3資料連線\nstr_connect = \"ptDiagtimeGeodata.db\"\n\n\ndef fnGetDBConn(str_connect):\n conn = sqlite3.connect(str_connect)\n return conn\n\n\n# 資料應該是長這樣[(\"6300100002\", 133), (\"6300100003\", 329), (\"6300100004\", 371), ...]\ndef fnTupleToDict(tupleData):\n _dictData = {}\n for item in tupleData:\n key = item[0]\n value = (item[1], item[2])\n _dictData[key] = value\n return _dictData\n\n\n# def fnCloseResource(fileErrLog, cur, conn):\n# fileErrLog.close()\n# del cur\n# conn.commit()\n# conn.close()\n# del conn\n\n\ndef fnCloseResource(cur, conn):\n del cur\n conn.commit()\n conn.close()\n del conn\n\n\[email protected]('/')\ndef index():\n return render_template('d3demo.html')\n\n\[email protected]('/GISSYS/api/geopopdata/<int:numStartYr>/<int:numDiffYr>/<int:numDiagYr>', methods=['GET'])\n@cross_origin()\ndef get_tasks(numStartYr, numDiffYr, numDiagYr):\n # 設定之後要轉成JSON的字典\n dictData = {}\n # 要寫入的檔案名稱\n strOutputJSONName = f\"TPE_{numStartYr}_{numStartYr + numDiffYr}_in_{numDiagYr}Yr.json\"\n # 讀取資料表的SQL字串\n strSQLReadData = f'''select a.n_village_id as village_id, count(a.n_village_id) as population, a.n_pop as all_pop from \n (\n \t select\n \t distinct pid as n_pid\n \t , VILLAGE_ID as n_village_id\n \t , pop as n_pop\n \t from\n \t N_DIAGDATE_GEODATA_TPE_NTPE\n \t where\n \t (julianday('now') - julianday(BIRTHDATE))/365 > {numStartYr}\n \t and (julianday('now') - julianday(BIRTHDATE))/365 <= {numStartYr + numDiffYr}\n \t and (julianday('now') - julianday(N_DIAGDATE))/365 <= {numDiagYr}\n ) a\n group by \n a.n_village_id, a.n_pop\n order by\n population desc'''\n\n # if not os.path.exists(FileOutputDir):\n # os.makedirs(FileOutputDir)\n # # 開啟要寫入的json\n # fileOutputJson = open(FileOutputDir + strOutputJSONName, 'w', encoding='utf-8')\n # # 開啟要寫入的errlog\n # fileErrLog = open(FileOutputDir + fileErrLogName, \"w\", encoding=\"utf-8\")\n\n # 連接sqlite3資料庫\n conn = fnGetDBConn(str_connect)\n cur = conn.cursor()\n # 讀取資料,把資料存進List of tuple中\n cur.execute(strSQLReadData)\n tupleData = cur.fetchall()\n # 轉換tuple成為dict\n dictData = fnTupleToDict(tupleData)\n # 輸出檔案成json\n # json.dump(dictData, fileOutputJson)\n # 關閉資源\n # fnCloseResource(fileErrLog, cur, conn)\n fnCloseResource(cur, conn)\n\n return jsonify({'tasks': dictData})\n\n\nif __name__ == '__main__':\n app.run(host='10.160.16.16', port=9999)\n" }, { "alpha_fraction": 0.5718954205513, "alphanum_fraction": 0.6116557717323303, "avg_line_length": 18.3157901763916, "blob_id": "dee18c8ed87c72257bd81bce6931032e2fa2295a", "content_id": "01523c79c65320858c8db4de3e85321d24e1ee90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2018, "license_type": "no_license", "max_line_length": 67, "num_lines": 95, "path": "/dataGenerator_OfficalDemo.py", "repo_name": "auxo86/Flask_WebAPI", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#!~/flask/bin/python3\n\nfrom flask import Flask, jsonify, g\nfrom flask_cors import CORS, cross_origin\nimport sqlite3\nimport os\n\n\napp = Flask(__name__)\nCORS(app)\n\n\n'''\n本程式用於產生餵給D3.js GEO功能的JSON dict資料\n讀取sqlite3,產生字典,然後轉成JSON\n'''\n\n# 設定語言環境\nos.environ['NLS_LANG'] = 'TRADITIONAL CHINESE_TAIWAN.AL32UTF8'\n\nnumStartYr = 65\nnumDiffYr = 85\nnumDiagYr = 100\nnumEndYr = numStartYr + numDiffYr\n\n# 要讀取的資料表名稱\nstr_tableNameForGet = \"N_DIAGDATE_GEODATA\"\n# 讀取資料表的SQL字串\nstrSQLReadData = f'''select\n VILLAGE_ID\n , count(VILLAGE_ID) as population\nfrom\n {str_tableNameForGet}\nwhere\n (julianday('now') - julianday(BIRTHDATE))/365 > {numStartYr}\n and (julianday('now') - julianday(BIRTHDATE))/365 <= {numEndYr}\n and (julianday('now') - julianday(N_DIAGDATE))/365 <= {numDiagYr}\ngroup by \n VILLAGE_ID\norder by\n population desc'''\n\n# 準備要存入資料的字典\ndictData = {}\n# 設定資料庫位置\nstrDB = \"ptDiagtimeGeodata.db\"\n\n\n# 要在flask中使用sqlite3,不是很直覺,要參考文擋\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(strDB)\n return db\n\n\[email protected]_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n\[email protected]('/GISSYS/api/geopopdata', methods=['GET'])\n@cross_origin()\ndef get_tasks():\n db = get_db()\n cur = db.execute(strSQLReadData)\n listAllRows = [row for row in cur]\n '''\n 資料應該是長這樣[{\n \"tasks\": [\n [\n 6301000029, \n 90768\n ], \n [\n 6301000039, \n 89266\n ], \n [\n 6300500002, \n 32507\n ], , ...]\n '''\n for rowitem in listAllRows:\n key = rowitem[0]\n value = rowitem[1]\n dictData[key] = value\n return jsonify({'tasks': dictData})\n\n\nif __name__ == '__main__':\n app.run()\n\n" }, { "alpha_fraction": 0.48815619945526123, "alphanum_fraction": 0.4983994960784912, "avg_line_length": 31.216495513916016, "blob_id": "b5650e620c2c3753e4ea992d52947cb7d6be7211", "content_id": "12ce35e2cf1914a76c2ce0e328971fe5bdde335f", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 3324, "license_type": "no_license", "max_line_length": 102, "num_lines": 97, "path": "/static/js/d3jsV4Slider.js", "repo_name": "auxo86/Flask_WebAPI", "src_encoding": "UTF-8", "text": "function slider(_min, _max)\n{\n var margin = {top: 10, left: 5, right: 10, bottom: 5},\n width = 400 - margin.left - margin.right,\n height = 30 - margin.top - margin.bottom,\n\n handle,\n // 因為有handle的面積,所以往右移一下\n handleAreaShiftCx = 8,\n slider,\n value = 0,\n // 更新handle值時要call的函數\n // 會設定value的值\n upd = function(d){value = d;},\n // 單純是函式指標的的callback\n cback = function(d){};\n\n var x = d3.scaleLinear()\n .domain([_min, _max])\n .range([0, width-15])\n .clamp(true);\n\n // el就是從外面傳進來的svg\n function chart(el){\n svg = el;\n\n // 新增圖層\n slider = el.attr(\"width\", width + margin.left + margin.right)\n .attr(\"height\", height + margin.top + margin.bottom)\n .append(\"g\").attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\");\n\n slider.append(\"line\")\n .attr(\"class\", \"track\")\n .attr(\"x1\", x.range()[0]+handleAreaShiftCx)\n .attr(\"x2\", x.range()[1]+handleAreaShiftCx)\n .select(function() { return this.parentNode.appendChild(this.cloneNode(true)); })\n .attr(\"class\", \"track-inset\")\n .select(function() { return this.parentNode.appendChild(this.cloneNode(true)); })\n .attr(\"class\", \"track-overlay\")\n .call(d3.drag()\n .on(\"start.interrupt\", function() { slider.interrupt(); })\n .on(\"start drag\", function() { setHandle(x.invert(d3.event.x - handleAreaShiftCx)); })\n .on('end', function() { ended(); })\n );\n\n slider.insert(\"g\", \".track-overlay\")\n .attr(\"class\", \"ticks\")\n .attr(\"transform\", \"translate(\"+ handleAreaShiftCx +\",\" + 18 + \")\")\n .selectAll(\"text\")\n .data(x.ticks(10))\n .enter().append(\"text\")\n .attr(\"x\", x)\n .attr(\"text-anchor\", \"middle\")\n .text(function(d) { return d; });\n\n handle = slider.insert(\"circle\", \".track-overlay\")\n .attr(\"class\", \"handle\")\n .attr(\"r\", handleAreaShiftCx)\n .attr('style', \"stroke: #099; stroke-width: 2\");\n\n function setHandle(h) {\n // 如果發生了事件,就把滑鼠的x座標資料轉換為value\n if (d3.event.sourceEvent) value = h;\n // 更新value\n upd(value);\n // 沒有做啥事的callback\n cback();\n }\n\n // 得到value值以後更新handle的位置\n upd = function (v) {\n // console.log(\"handle cx: \",x(v),\", round value:\",x(Math.round(v)));\n value = v;\n // 設定handle的cx值,也就是實際座標值\n handle.attr(\"cx\", x(v) + handleAreaShiftCx);\n }\n }\n\n chart.margin = function (_) {\n // return margin;\n if (!arguments.length) return margin;\n margin = _;\n return chart;\n };\n chart.callback = function (_) {\n if (!arguments.length) return cback;\n cback = _;\n return chart;\n };\n chart.value = function (_) {\n if (!arguments.length) return value;\n upd(_);\n return chart;\n };\n\n return chart;\n}" } ]
3
vgm64/bart_scraper
https://github.com/vgm64/bart_scraper
ffc7650b351cc6da2cf2d505ae77c104cb4c637e
c8508fa50eacf0af1f8393785d07460c14a4f038
b8b304a500aa8e70936853b1964f7c18d4856c60
refs/heads/master
2016-09-06T21:50:24.969101
2015-01-28T06:34:20
2015-01-28T06:34:20
29,954,165
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5918114185333252, "alphanum_fraction": 0.5942928194999695, "avg_line_length": 24.125, "blob_id": "7e1b4c1cb19111515b47272a400ef1f3e5d8144f", "content_id": "58c4385381283e65e39be23b5263ff129284476d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 806, "license_type": "no_license", "max_line_length": 73, "num_lines": 32, "path": "/bart_scraper/scraper.py", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "\"\"\" Scraping.\n\"\"\"\n\nfrom bart_api import BartApi\nimport datetime\n\n\nclass BartScraper(BartApi):\n def __init__(self, api_key=\"MW9S-E7SL-26DU-VV8V\"):\n super(BartScraper, self).__init__(api_key)\n self.all_stations = None\n self.stn_by_abbr = None\n\n def initialize_bart_info(self):\n self.all_stations = self.get_stations()\n self.stn_by_abbr = {}\n for stn in self.all_stations:\n self.stn_by_abbr['abbr'] = stn\n\n # def get_next_scrape_time(self):\n # return datetime.datetime.now() + datetime.timedelta(seconds=60)\n\n def scrape_departure_times(self):\n etd = self.etd(station='ALL')\n # self.write(etd)\n return etd\n\n def write_to_stdout(self, *args):\n print args\n\n def write(self, *args):\n self.write_to_stdout(args)\n\n\n" }, { "alpha_fraction": 0.5414012670516968, "alphanum_fraction": 0.5414012670516968, "avg_line_length": 12.083333015441895, "blob_id": "62d7ce7c9cdb8997f017aa903821affff5c62777", "content_id": "7521affaa394c61c5b03c148a4eff0adbc7944b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 33, "num_lines": 12, "path": "/bart_scraper/logging.py", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nimport datetime\n\n\ndef log(*args):\n now = datetime.datetime.now()\n print '[{}]'.format(now),\n for arg in args:\n print arg,\n print\n" }, { "alpha_fraction": 0.5667060017585754, "alphanum_fraction": 0.5726091861724854, "avg_line_length": 34.29166793823242, "blob_id": "b8e0c0634d827cb5a68fe16fbe5f3d4823bb3179", "content_id": "8a053509f9d83868e36d213575ad624ccaf98a8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 101, "num_lines": 24, "path": "/bart_scraper/db.py", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "\"\"\" DB interface.\n\"\"\"\nimport MySQLdb\nimport sqlite3\nimport datetime\n\n\nclass BartDB(object):\n def __init__(self, user=None, pw=None, engine='MySQLdb', db='bart'):\n if engine == 'MySQLdb':\n self.db = MySQLdb.connect(host='localhost', user=user, pw=pw, db='bart')\n self.cursor = self.db.cursor()\n if engine == 'sqlite3':\n self.db = sqlite3.connect(database=db)\n self.cursor = self.db.cursor()\n try:\n self.cursor.execute(\"SELECT * FROM etd LIMIT 1\")\n except sqlite3.OperationalError:\n self.cursor.execute(\"CREATE TABLE etd (time, data)\")\n self.db.commit()\n\n def insert(self, timestamp, data):\n self.cursor.execute(\"\"\"INSERT INTO etd (time, data) VALUES (?,?)\"\"\", (timestamp, repr(data)))\n self.db.commit()\n" }, { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 23.5, "blob_id": "b72b757c166797e1b890f141888411cd973567f5", "content_id": "c6725e95efa7d3252c86925d3317b3b81ef6cee9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 98, "license_type": "no_license", "max_line_length": 70, "num_lines": 4, "path": "/README.rst", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "Bart Scraper\n============\n\nA respectful BART scraper. Schedules API calls, dumps to db, and more.\n" }, { "alpha_fraction": 0.8448275923728943, "alphanum_fraction": 0.8448275923728943, "avg_line_length": 13.25, "blob_id": "4a71f36c2421bceaa78950ee98f698b2de49d352", "content_id": "dce1c083a3bc42d731bb9beecfdbb81e8679e493", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/bart_scraper/__init__.py", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "import db\nimport program\nimport scheduler\nimport scraper\n\n" }, { "alpha_fraction": 0.6026522517204285, "alphanum_fraction": 0.6100196242332458, "avg_line_length": 28.507246017456055, "blob_id": "9ce8e5b056a8f5e0cad3b6a57207d4bc920b59b6", "content_id": "fb18e2e71482aba2298a95be557da4a4af2162f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2036, "license_type": "no_license", "max_line_length": 74, "num_lines": 69, "path": "/bart_scraper/scheduler.py", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "from bart_api import BartApi\nfrom logging import log\nimport time\nfrom datetime import datetime, timedelta\nfrom threading import Timer\n\none_day = timedelta(hours=24)\n\n\ndef next_time(clock_point, offset=1):\n \"\"\" Return the (datetime, seconds) tuple for the next time to run. \"\"\"\n now = datetime.now()\n then = now\n if clock_point == 'hour':\n then = then.replace(minute=0)\n then = then.replace(second=0)\n then = then.replace(microsecond=0)\n then = then + timedelta(seconds=3600+offset)\n elif clock_point == 'minute':\n then = then.replace(second=0)\n then = then.replace(microsecond=0)\n then = then + timedelta(seconds=60+offset)\n elif clock_point == 'second':\n then = then.replace(microsecond=0)\n then = then + timedelta(seconds=offset)\n\n sleep_time = (then - now).total_seconds()\n return then, sleep_time\n\n\ndef wait_until_in_range(time_range):\n start, end = time_range\n now = datetime.now()\n today, now_time = now.date(), now.time()\n\n\n log(\"Consider this...\")\n log(start, \"<\", now_time, \"<\", end)\n # We are within range.\n if start < now.time() < end:\n log(\"In range.\")\n time_to_sleep = timedelta()\n if now.time() < start:\n log(\"Before start\")\n time_to_sleep = datetime.combine(today, start) - now\n if now.time() > end:\n log(\"After end\")\n time_to_sleep = datetime.combine(today, start) + one_day - now\n\n log(\"Need to sleep\", time_to_sleep.total_seconds(), \"seconds.\")\n time.sleep(time_to_sleep.total_seconds())\n\n\ndef on_the(sched, func, time_range=None):\n if time_range:\n wait_until_in_range(time_range)\n execution_time, sleep_time = next_time(sched)\n log(\"I know to sleep for\", sleep_time, \"before executing.\")\n time.sleep(sleep_time)\n log(\"Executing func.\")\n func()\n\n\nif __name__ == '__main__':\n while True:\n dt, t = next_time('second')\n now = datetime.now()\n print \"I'm good\", now, now.microsecond, dt, t\n time.sleep(t)\n" }, { "alpha_fraction": 0.5810968279838562, "alphanum_fraction": 0.5962660312652588, "avg_line_length": 20.424999237060547, "blob_id": "ea596cf0c5046562d08a0826effea90c52adc460", "content_id": "9763daa8adc5d11105847ee4ec0cc4a36a36638f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 73, "num_lines": 40, "path": "/bart_scraper/program.py", "repo_name": "vgm64/bart_scraper", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nimport datetime\n\nfrom scraper import BartScraper\nfrom db import BartDB\nfrom scheduler import on_the\nfrom logging import log\n\n\nclass BartProgram(object):\n def __init__(self):\n self.scraper = None\n self.db = None\n\n def go(self):\n timestamp = datetime.datetime.now()\n trains = self.scraper.scrape_departure_times()\n self.db.insert(timestamp, trains)\n\n def run(self):\n self.scraper = BartScraper()\n self.db = BartDB(engine='sqlite3', db='./bart.db')\n self.scraper.initialize_bart_info()\n\n time_range = datetime.time(22, 22, 30), datetime.time(23, 28, 25)\n\n while True:\n log(\"GLOBAL START\")\n on_the('second', self.go, time_range)\n\n\n\n\nif __name__ == '__main__':\n try:\n BartProgram().run()\n except KeyboardInterrupt:\n log(\"Exiting.\")\n" } ]
7
l3arteK/myFirstGame
https://github.com/l3arteK/myFirstGame
1773b332bb2dbaf5fbc28288ef81c2626456070d
aad1377593c34a768e5389c54ef3ccb0c02a8ac0
b6e23973c4702391bfbe97674db950fde270d3eb
refs/heads/master
2023-07-10T16:48:59.609819
2021-08-07T09:44:28
2021-08-07T09:44:28
393,626,348
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4976619780063629, "alphanum_fraction": 0.5368514657020569, "avg_line_length": 24.372880935668945, "blob_id": "d00ebf01c8ce23a8e28e8604c60dfe7d390b7d88", "content_id": "aa036aa5b5be699f92773a7f13683a865de97694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4491, "license_type": "no_license", "max_line_length": 75, "num_lines": 177, "path": "/main.py", "repo_name": "l3arteK/myFirstGame", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nimport math\nimport time\n\npygame.init()\n\ntlo = pygame.image.load('tlo.png')\nheart = pygame.image.load('heart.png')\n# gracz\nplayerImg = pygame.image.load('player.png')\nplayerX = 100\nplayerY = 450\nplayerY_move = 0\nplayerX_move = 0\n\n\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\n# enemy\nenemyImg = []\nenemyX = []\nenemyY = []\nenemyX_move = []\nenemyY_move = []\nfire = []\nnum_of_enemies = 12\nile = 1\nplus = True\nfor i in range(num_of_enemies):\n enemyImg.append(pygame.image.load('enemy.png'))\n enemyX.append(random.randint(1210, 1400))\n enemyY.append(random.randint(20, 1100))\n enemyX_move.append(random.random() + random.randint(1, 2))\n fire.append(False)\n\n\ndef enemy(x, y, i):\n screen.blit(enemyImg[i], (x, y))\n\n\ndef live(n):\n for i in range(n):\n x = 100 + (i * 115)\n screen.blit(heart, (x, 20))\n\n\n# stworzenie ekranu\nscreen = pygame.display.set_mode((1200, 900))\npygame.display.set_caption(\"ROAD\")\n\n# przegrana\nlose = False\nstart = False\nlives = 3\nend_font = pygame.font.Font('BabyDoll.ttf', 124)\nnote_font = pygame.font.Font('BabyDoll.ttf', 32)\nwon = False\n\ndef game_over_text():\n end_text = end_font.render(\"GAME OVER\", True, (0, 0, 0))\n note_text = note_font.render(\"PRESS SPACE TO REPLAY\", True, (0, 0, 0))\n screen.blit(end_text, (300, 400))\n screen.blit(note_text, (465, 750))\n\n\ndef won_text():\n won_text = end_font.render(\"YOU WON!\", True, (0, 0, 0))\n note_text = note_font.render(\"PRESS SPACE TO REPLAY\", True, (0, 0, 0))\n screen.blit(won_text, (300, 400))\n screen.blit(note_text, (465, 750))\n\n\ndef isCollision(enemyX, enemyY, playerX, playerY):\n distance = math.sqrt((playerX - enemyX) ** 2 + (playerY - enemyY) ** 2)\n if distance < 45:\n return True\n\n\ndef start_():\n for i in range(3,-1,-1):\n screen.blit(tlo, (0, 0))\n player(playerX, playerY)\n time_text = end_font.render(str(i), True, (0, 0, 0))\n screen.blit(time_text, (550, 400))\n pygame.display.update()\n time.sleep(1)\n return True\n\n\ndef reset():\n for i in range(num_of_enemies):\n enemyX[i] = random.randint(1210, 1400)\n enemyY[i] = random.randint(20, 1100)\n enemyX_move[i] = random.random() + random.randint(1, 2)\n fire[i] = False\n\n\nrunning = True\nwhile running:\n\n screen.fill((0, 0, 0))\n screen.blit(tlo, (0, 0))\n # wychodzenie z gry\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # sterowanie\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n playerY_move -= 3\n if event.key == pygame.K_DOWN:\n playerY_move += 3\n if event.key == pygame.K_LEFT:\n playerX_move -= 3\n if event.key == pygame.K_RIGHT:\n playerX_move += 3\n if event.key == pygame.K_SPACE and (lose or won):\n lose = False\n ile = 1\n lives = 3\n reset()\n start=False\n\n if event.type == pygame.KEYUP:\n playerY_move = 0\n playerX_move = 0\n\n playerX += playerX_move\n playerY += playerY_move\n if playerY >= 800:\n playerY = 800\n elif playerY <= 0:\n playerY = 0\n if playerX >= 300:\n playerX = 300\n elif playerX <= 100:\n playerX = 100\n if not lose:\n for j in range(ile):\n enemyX[j] -= enemyX_move[j]\n if not fire[j]:\n enemyY[j] = playerY\n fire[j] = True\n enemy(enemyX[j], enemyY[j], j)\n collision = isCollision(enemyX[j], enemyY[j], playerX, playerY)\n if collision:\n lives -= 1\n print(\"trafiony\")\n enemyX[j] = (-50)\n enemyY[j] = random.randint(40, 1050)\n if lives == 0:\n lose = True\n for n in range(ile):\n if enemyX[n] > -35:\n plus = False\n break\n else:\n plus = True\n if plus and ile < num_of_enemies:\n ile = ile + 1\n print(ile)\n for n in range(ile - 1):\n enemyX[n] = random.randint(1200, 1399)\n if lose:\n game_over_text()\n if ile == 12 and plus:\n won_text()\n won = True\n live(lives)\n player(playerX, playerY)\n if not start:\n start = start_()\n pygame.display.update()\n" }, { "alpha_fraction": 0.7899159789085388, "alphanum_fraction": 0.7899159789085388, "avg_line_length": 38.33333206176758, "blob_id": "0eaac2ae2b7ae645c272bdce12cdaaf3494e56cc", "content_id": "ba855ae617d830b12705c64b9351c5dc4602e05b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 64, "num_lines": 3, "path": "/README.md", "repo_name": "l3arteK/myFirstGame", "src_encoding": "UTF-8", "text": "# myFirstGame\nMoja pieirwsza stworzona gra w pygame.\nW folderze \"Gra\" można znależć plik .exe uruchamiający całą grę. \n" } ]
2
cascad/cbr_exchange_rates
https://github.com/cascad/cbr_exchange_rates
5ce1200b5b19daaa2881ef4794ccaba825fabc35
262b203492515c26f25f30456ef3ac9ebe97cb19
c0fb55d1df2ae34c8aded13cc9bdacdf275b91a1
refs/heads/master
2022-10-05T22:56:23.939044
2020-06-09T11:49:24
2020-06-09T11:49:24
270,973,090
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44124168157577515, "alphanum_fraction": 0.5437915921211243, "avg_line_length": 99.22222137451172, "blob_id": "848a2a49faf5afa04bf46c093de0eb12d8ac798b", "content_id": "5ee74d5bfa89da64011335b6ed4bbd6dc9895631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3608, "license_type": "no_license", "max_line_length": 112, "num_lines": 36, "path": "/exc_loader/tst_raw_rub_exc.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "test_raw_cbr_rub_1 = [\n {'NumCode': '036', 'CharCode': 'AUD', 'Nominal': '1', 'Name': 'Australian Dollar', 'Value': '43,3835'},\n {'NumCode': '944', 'CharCode': 'AZN', 'Nominal': '1', 'Name': 'Azerbaijan Manat', 'Value': '36,4902'},\n {'NumCode': '826', 'CharCode': 'GBP', 'Nominal': '1', 'Name': 'British Pound Sterling', 'Value': '81,3069'},\n {'NumCode': '051', 'CharCode': 'AMD', 'Nominal': '100', 'Name': 'Armenia Dram', 'Value': '12,9239'},\n {'NumCode': '933', 'CharCode': 'BYN', 'Nominal': '1', 'Name': 'Belarussian Ruble', 'Value': '29,3545'},\n {'NumCode': '975', 'CharCode': 'BGN', 'Nominal': '1', 'Name': 'Bulgarian lev', 'Value': '35,4720'},\n {'NumCode': '986', 'CharCode': 'BRL', 'Nominal': '1', 'Name': 'Brazil Real', 'Value': '15,4006'},\n {'NumCode': '348', 'CharCode': 'HUF', 'Nominal': '100', 'Name': 'Hungarian Forint', 'Value': '20,9637'},\n {'NumCode': '344', 'CharCode': 'HKD', 'Nominal': '10', 'Name': 'Hong Kong Dollar', 'Value': '79,5029'},\n {'NumCode': '208', 'CharCode': 'DKK', 'Nominal': '10', 'Name': 'Danish Krone', 'Value': '92,8776'},\n {'NumCode': '840', 'CharCode': 'USD', 'Nominal': '1', 'Name': 'US Dollar', 'Value': '61,9057'},\n {'NumCode': '978', 'CharCode': 'EUR', 'Nominal': '1', 'Name': 'Euro', 'Value': '69,3777'},\n {'NumCode': '356', 'CharCode': 'INR', 'Nominal': '100', 'Name': 'Indian Rupee', 'Value': '86,8255'},\n {'NumCode': '398', 'CharCode': 'KZT', 'Nominal': '100', 'Name': 'Kazakhstan Tenge', 'Value': '16,1665'},\n {'NumCode': '124', 'CharCode': 'CAD', 'Nominal': '1', 'Name': 'Canadian Dollar', 'Value': '47,4446'},\n {'NumCode': '417', 'CharCode': 'KGS', 'Nominal': '100', 'Name': 'Kyrgyzstan Som', 'Value': '88,6979'},\n {'NumCode': '156', 'CharCode': 'CNY', 'Nominal': '10', 'Name': 'China Yuan', 'Value': '88,8696'},\n {'NumCode': '498', 'CharCode': 'MDL', 'Nominal': '10', 'Name': 'Moldova Lei', 'Value': '35,9917'},\n {'NumCode': '578', 'CharCode': 'NOK', 'Nominal': '10', 'Name': 'Norwegian Krone', 'Value': '70,3834'},\n {'NumCode': '985', 'CharCode': 'PLN', 'Nominal': '1', 'Name': 'Polish Zloty', 'Value': '16,3017'},\n {'NumCode': '946', 'CharCode': 'RON', 'Nominal': '1', 'Name': 'Romanian Leu', 'Value': '14,5005'},\n {'NumCode': '960', 'CharCode': 'XDR', 'Nominal': '1', 'Name': 'SDR', 'Value': '85,6051'},\n {'NumCode': '702', 'CharCode': 'SGD', 'Nominal': '1', 'Name': 'Singapore Dollar', 'Value': '45,9958'},\n {'NumCode': '972', 'CharCode': 'TJS', 'Nominal': '10', 'Name': 'Tajikistan Ruble', 'Value': '63,8980'},\n {'NumCode': '949', 'CharCode': 'TRY', 'Nominal': '1', 'Name': 'Turkish Lira', 'Value': '10,4148'},\n {'NumCode': '934', 'CharCode': 'TMT', 'Nominal': '1', 'Name': 'New Turkmenistan Manat', 'Value': '17,7126'},\n {'NumCode': '860', 'CharCode': 'UZS', 'Nominal': '10000', 'Name': 'Uzbekistan Sum', 'Value': '65,1121'},\n {'NumCode': '980', 'CharCode': 'UAH', 'Nominal': '10', 'Name': 'Ukrainian Hryvnia', 'Value': '26,1205'},\n {'NumCode': '203', 'CharCode': 'CZK', 'Nominal': '10', 'Name': 'Czech Koruna', 'Value': '27,3019'},\n {'NumCode': '752', 'CharCode': 'SEK', 'Nominal': '10', 'Name': 'Swedish Krona', 'Value': '66,2930'},\n {'NumCode': '756', 'CharCode': 'CHF', 'Nominal': '1', 'Name': 'Swiss Franc', 'Value': '63,8994'},\n {'NumCode': '710', 'CharCode': 'ZAR', 'Nominal': '10', 'Name': 'S.African Rand', 'Value': '44,0839'},\n {'NumCode': '410', 'CharCode': 'KRW', 'Nominal': '1000', 'Name': 'South Korean Won', 'Value': '53,5535'},\n {'NumCode': '392', 'CharCode': 'JPY', 'Nominal': '100', 'Name': 'Japanese Yen', 'Value': '56,9746'},\n]\n" }, { "alpha_fraction": 0.4334038197994232, "alphanum_fraction": 0.4857293963432312, "avg_line_length": 25.64788818359375, "blob_id": "205c17eef05ece62329bb246a15c293b0728e7b8", "content_id": "7158aaa45378693638c391878d7042ef33c33022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1892, "license_type": "no_license", "max_line_length": 116, "num_lines": 71, "path": "/exc_loader/curr_exc_cmp.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\nfrom pprint import pformat\n\n\ndef curr2dict(fn: str):\n with open(fn, \"r\") as f:\n lines = f.readlines()\n\n data = {}\n for line in lines:\n ex, info, usd, rub = line.split(\",\")\n key = ex + \" $ \" + info\n data[key] = (rub, usd)\n\n return data\n\n\ndef cmp(d1, d2):\n keys = set(d1.keys())\n keys.update(d2.keys())\n\n res = []\n\n for k in keys:\n d1_data = None\n d2_data = None\n\n if k in d1:\n d1_data = float(d1[k][0]), float(d1[k][1])\n if k in d2:\n d2_data = float(d2[k][0]), float(d2[k][1])\n\n if d1_data is None or d2_data is None:\n raise Exception(f\"{k} exchange is None in {'d1' if d1_data is None else 'd2'}\")\n else:\n if d1_data != d2_data:\n res.append((k, d1_data[0] - d2_data[0], d1_data[1] - d2_data[1], d1_data[0], d2_data[0], d1_data[1],\n d2_data[1]))\n\n return res\n\n\ndef get_diff(file1: str, file2: str):\n d1 = curr2dict(file1)\n d2 = curr2dict(file2)\n res = cmp(d1, d2)\n res = sorted(res, key=lambda x: datetime.datetime.strptime(x[0].split(\" $ \")[1], \"%Y-%m-%d\"))\n # pprint(res)\n for i in res:\n if i[1] is None:\n print(i[0], None)\n continue\n\n curr = i[0]\n drub = abs(round(i[1], 3))\n dusd = abs(round(i[2], 3))\n rub_1 = round(i[3], 3)\n rub_2 = round(i[4], 3)\n usd_1 = round(i[5], 3)\n usd_2 = round(i[6], 3)\n\n if dusd > 0.05: # and curr.split(\" $ \")[0] != \"CUP\"\n print(f\"{curr} {rub_1} - {rub_2} = {drub} RUB\\n{curr} {usd_1} - {usd_2} = {dusd} USD\")\n\n with open(\"../cmp_res.txt\", \"w\") as f:\n f.write(pformat(res))\n\n\nif __name__ == \"__main__\":\n currs = (\"../exchange_data/curr_2020-04-21.csv\", \"../exchange_data/alter_curr_2020-04-21.csv\")\n get_diff(currs[0], currs[1])\n" }, { "alpha_fraction": 0.6578171253204346, "alphanum_fraction": 0.7050147652626038, "avg_line_length": 27.25, "blob_id": "a72fecacf4f5237902fbf2d45ce50a197adcde95", "content_id": "3f9069d5ffa84efc06f3140a7697b8a4873b4aa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 339, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/README.md", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "# Exchange rates loader\nTool for get exchange rates from cbr.ru\n\n### Setup virtual environments and install requirements\n```\n(project/)$ python -m venv <path_to_venv>\n(project/)$ <path_to_venv>/python -m pip install -r requirements.txt\n```\n### Run exchange loader\n```\n(project/)$ <path_to_venv>/python run app.py 2019.01.10 2019.03.01\n```\n" }, { "alpha_fraction": 0.5319837927818298, "alphanum_fraction": 0.5402833819389343, "avg_line_length": 35.32352828979492, "blob_id": "e7ea9f59392ebaa673d9eff7417285a0b3418a16", "content_id": "3b7b700ad66bac6567a352fbc60a4d8041b087d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4968, "license_type": "no_license", "max_line_length": 112, "num_lines": 136, "path": "/app.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import asyncio\nimport datetime\nimport os\n\nimport click\nimport dateutil.parser\nfrom dateutil.parser import ParserError\n\nfrom exc_loader.cbr_get_rub import cbr_rub_page_loader, cbr_rub_page_parser\nfrom exc_loader.cbr_get_usd import cbr_usd_xml_loader, cbr_usd_xml_parser\nfrom exc_loader.models import RawExchange, Exchange\nfrom exc_loader.other_exc_loader import Collector\nfrom exc_loader.ranges import get_days\n\n\ndef load_exc(start: datetime.datetime, end: datetime.datetime, output_filename: str, backup: str, append: bool):\n data = {}\n\n with open(output_filename, \"w\" if not append else \"a\") as f, open(backup, \"w\" if not append else \"a\",\n encoding=\"utf8\") as b:\n last_correct_usd_raw_records = None\n last_correct_usd_date = None\n\n for day in get_days(start, end):\n text = cbr_rub_page_loader(day)\n d = cbr_rub_page_parser(text)\n\n if len(d) != 34:\n raise Exception(f\"{len(d)} on {day} RUB currents\")\n\n rub_excs = [RawExchange(day, i) for i in d]\n\n u2r_rate = None\n for i in rub_excs:\n if i.str == \"USD\":\n u2r_rate = i\n\n if u2r_rate is None:\n raise Exception(f\"empty first USD date {day.date()}\")\n\n for exc in rub_excs:\n rub_rate = exc.rate / exc.multiply\n usd_rate = rub_rate / u2r_rate.rate\n\n e = Exchange(day, exc.str, rub_rate, usd_rate)\n f.write(e.to_line())\n back = \",\".join((day.strftime(\"%Y-%m-%d\"), \"to_rub\", exc.str, str(exc.rate), str(exc.multiply),\n \"Прямая\" if exc.convtype == 0 else \"Обратная\"))\n b.write(back + \"\\n\")\n data[(e.str, e.date)] = str(e)\n\n text = cbr_usd_xml_loader(day)\n xml_date, d = cbr_usd_xml_parser(text)\n\n rec_count = len(d)\n\n if rec_count > 0 and rec_count != 113:\n raise Exception(f\"{rec_count} on {d} rare currents - error\")\n\n if rec_count == 0:\n if last_correct_usd_raw_records is None:\n raise Exception(\n f\"Page on first date {day} for load exchanges has no data. Please, use too early date.\")\n else:\n print(f\"Page on {day} has no data. Used previous date {last_correct_usd_date}.\")\n d = last_correct_usd_raw_records\n\n if xml_date.date() != day.date():\n raise Exception(f\"Page on {day} has data on {xml_date}.\")\n\n last_correct_usd_raw_records = d\n last_correct_usd_date = day\n\n usd_excs = [RawExchange(day, i) for i in d]\n\n for exc in usd_excs:\n if day != exc.date:\n print(f\"bad date on u2r {exc.date}\")\n if exc.convtype == 0:\n usd_rate = 1 / exc.rate\n rub_rate = usd_rate * u2r_rate.rate\n else:\n usd_rate = exc.rate\n rub_rate = exc.rate * u2r_rate.rate\n\n e = Exchange(day, exc.str, rub_rate, usd_rate)\n f.write(e.to_line())\n back = \",\".join((day.strftime(\"%Y-%m-%d\"), \"to_usd\", exc.str, str(exc.rate), str(exc.multiply),\n \"Прямая\" if exc.convtype == 0 else \"Обратная\"))\n b.write(back + \"\\n\")\n data[(e.str, e.date)] = str(e)\n\n print(f\"Complete: {day.date()}\")\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\[email protected]('--output', default=os.path.join(os.getcwd(), \"exchange_data\"), help='Output directory')\[email protected]('--backup', default=os.path.join(os.getcwd(), \"backup\"), help='Backup data directory')\[email protected]('start_date')\[email protected]('end_date')\ndef run(output: str, backup: str, start_date: str, end_date: str):\n \"\"\"Example: `python run app.py 2019.01.01 2020.01.01`\"\"\"\n for path in (output, backup):\n if not os.path.isdir(path):\n os.makedirs(path)\n\n try:\n start = dateutil.parser.parse(start_date)\n end = dateutil.parser.parse(end_date)\n except ParserError as e:\n click.echo(e)\n return\n\n now = datetime.datetime.now()\n # fake = datetime.datetime(2020, 3, 3)\n\n output_path = os.path.join(output, f\"curr_{now.strftime('%Y-%m-%d')}.csv\")\n backup_filename = \"backup_\" + datetime.datetime.now().strftime(\"%Y-%m-%d\") + \".csv\"\n backup_path = os.path.join(backup, backup_filename)\n\n click.echo(f\" [x] Output: [{output_path}]. Backup: [{backup_path}]\")\n\n load_exc(start, end, output_path, backup_path, False)\n c = Collector(output_path)\n currs = {\"MMK\": (start, end)}\n asyncio.get_event_loop().run_until_complete(c.download(currs))\n click.echo('Complete')\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.6627771258354187, "alphanum_fraction": 0.7094515562057495, "avg_line_length": 37.95454406738281, "blob_id": "a2d99b02d7eb3248cae212bd809cca7aa3e56f57", "content_id": "62e714224a16795dca28c84fd724f09f1245f03f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 171, "num_lines": 22, "path": "/debug_utils/cbr_soap_manual.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import requests\n\nendpoint = \"http://www.cbr.ru/DailyInfoWebServ/DailyInfo.asmx\"\n\nbody = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<soap12:Envelope xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:soap12=\"http://www.w3.org/2003/05/soap-envelope\">\n <soap12:Body>\n <GetReutersCursOnDateXML xmlns=\"http://web.cbr.ru/\">\n <On_date>2020-03-30</On_date>\n </GetReutersCursOnDateXML>\n </soap12:Body>\n</soap12:Envelope>\"\"\"\n\nbody = body.encode('utf-8')\nsession = requests.session()\nsession.headers = {\"Content-Type\": \"application/soap+xml; charset=utf-8\"}\nsession.headers.update({\"Content-Length\": str(len(body))})\nresponse = session.post(url=endpoint, data=body, verify=False)\nprint(response.text)\nprint(response.status_code)\n# with open(\"bad_soap.html\", \"wb\") as f:\n# f.write(response.content)\n" }, { "alpha_fraction": 0.5761353373527527, "alphanum_fraction": 0.5796972513198853, "avg_line_length": 29.351350784301758, "blob_id": "b379dea5196129c17a09ca4c28656e8b465d47b7", "content_id": "bc27ea19ab45cdb3ac283a3de4f0112afd557732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1123, "license_type": "no_license", "max_line_length": 119, "num_lines": 37, "path": "/exc_loader/models.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\n\n\ndef d2s1(dt: datetime.datetime):\n return dt.strftime(\"%d/%m/%Y\")\n\n\ndef d2s2(dt: datetime.datetime):\n return dt.strftime(\"%d.%m.%Y\")\n\n\nclass RawExchange:\n def __init__(self, date: datetime.datetime, data: dict):\n self.digit = data[\"NumCode\"].strip()\n self.str = data[\"CharCode\"].strip()\n self.name = data[\"Name\"]\n self.convtype = data[\"Convtype\"]\n self.multiply = int(data[\"Nominal\"])\n self.rate = float(data[\"Value\"].replace(\",\", \".\").replace(\" \", \"\"))\n self.date = date\n\n def __str__(self):\n return f\"{self.date.date()}:{self.str}:{self.rate}\"\n\n\nclass Exchange:\n def __init__(self, date: datetime.datetime, charname: str, rub_rate: float, usd_rate: float):\n self.date = date\n self.str = charname\n self.rub_rate = rub_rate\n self.usd_rate = usd_rate\n\n def __str__(self):\n return f\"{self.date.date()}:{self.str}:{self.rub_rate}:{self.usd_rate}\"\n\n def to_line(self, sep=\",\"):\n return sep.join((self.str, str(self.date.strftime(\"%Y-%m-%d\")), str(self.usd_rate), str(self.rub_rate))) + \"\\n\"\n" }, { "alpha_fraction": 0.8783783912658691, "alphanum_fraction": 0.8783783912658691, "avg_line_length": 7.333333492279053, "blob_id": "ad87dfbd97ccf150ee08971cb24e2c9c18c24ce1", "content_id": "ffd63c8ad0863131fb9480d74cd1731c5aee308e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 74, "license_type": "no_license", "max_line_length": 15, "num_lines": 9, "path": "/requirements.txt", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "pandas\nlxml\ncssselect\naiohttp\nrequests\nopenpyxl\ntqdm\npython-dateutil\nclick" }, { "alpha_fraction": 0.5777084231376648, "alphanum_fraction": 0.6052183508872986, "avg_line_length": 33.233009338378906, "blob_id": "07867ca4520ef03eaf37433c33b68279f186492d", "content_id": "858ab7c7828942aa6bcfad5ad95e93d54f55e35b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3546, "license_type": "no_license", "max_line_length": 175, "num_lines": 103, "path": "/exc_loader/cbr_get_usd.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\n\nimport lxml.html\nimport requests\nfrom lxml.etree import fromstring\n\nfrom exc_loader.codes import codes\nfrom exc_loader.models import d2s2, RawExchange\n\nPOS = {0: \"NumCode\", 1: \"CharCode\", 2: \"Name\", 3: \"Convtype\", 4: \"Value\"}\n\n\ndef cbr_usd_page_loader(date: datetime.datetime) -> str:\n base_url = \"https://www.cbr.ru/hd_base/seldomc/sc_daily/\"\n dt = d2s2(date)\n post_data = {\"UniDbQuery.Posted\": \"True\", \"UniDbQuery.To\": dt}\n # headers = {\n # \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36\"}\n\n a = requests.post(base_url, data=post_data, headers={\"Referrer\": base_url}) # , headers=headers\n return a.text\n\n\ndef cbr_usd_xml_loader(date: datetime.datetime) -> str:\n endpoint = \"http://www.cbr.ru/DailyInfoWebServ/DailyInfo.asmx\"\n\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <soap12:Envelope xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:soap12=\"http://www.w3.org/2003/05/soap-envelope\">\n <soap12:Body>\n <GetReutersCursOnDateXML xmlns=\"http://web.cbr.ru/\">\n <On_date>{dt}</On_date>\n </GetReutersCursOnDateXML>\n </soap12:Body>\n </soap12:Envelope>\"\"\"\n\n sdate = date.strftime(\"%Y-%m-%d\")\n body = template.format(dt=sdate).encode(\"utf8\")\n\n headers = {\"Content-Type\": \"application/soap+xml; charset=utf-8\", \"Content-Length\": str(len(body))}\n response = requests.post(url=endpoint, data=body, headers=headers, verify=False)\n if response.status_code != 200:\n raise Exception(response.text)\n return response.text\n\n\ndef cbr_usd_xml_parser(xml: str):\n raw = fromstring(xml.encode(\"utf8\"))\n header = raw.xpath(\"//ReutersValutesData\")\n # print(dir(header[0]))\n sdate = header[0].get(\"OnDate\")\n date = datetime.datetime.strptime(sdate, \"%Y%m%d\")\n print(date)\n\n currencies = raw.xpath(\"//ReutersValutesData/Currency\")\n raw_excs = []\n for curr in currencies:\n cdata = {}\n for child in curr:\n cdata[child.tag] = child.text\n\n raw_excs.append(\n {\"Value\": cdata[\"val\"], \"NumCode\": cdata[\"num_code\"], \"CharCode\": codes[cdata[\"num_code\"]],\n \"Name\": codes[cdata[\"num_code\"]], \"Convtype\": int(cdata[\"dir\"]), \"Nominal\": 1})\n\n return date, raw_excs\n\n\ndef cbr_usd_page_parser(text: str):\n data = []\n root = lxml.html.fromstring(text)\n sel = \"#content > div > div > div > div.table-wrapper > div.table > table\" # > tbody\n r = root.cssselect(sel)\n if len(r) == 0:\n raise Exception(text)\n\n for row in r[0][1:]:\n dct = {}\n for i, td in enumerate(row):\n dct[POS[i]] = td.text\n\n if dct[\"Convtype\"] not in (\"Прямая\", \"Обратная\"):\n raise Exception(dct[\"Convtype\"])\n\n dct[\"Convtype\"] = 0 if dct[\"Convtype\"] == \"Прямая\" else 1\n dct[\"Nominal\"] = 1\n data.append(dct)\n # if not isinstance(row, lxml.html.HtmlComment):\n # print(row[0])\n # print(dir(row[0]))\n # code = row[0].text_content()\n # val = float(row[2].text)\n # ex[code] = val\n if len(data) < 100 or len(data) > 120:\n raise Exception(f\"{len(data)}, rates: {[i['Value'] for i in data]}\")\n return data\n\n\nif __name__ == \"__main__\":\n date = datetime.datetime(2020, 1, 1)\n text = cbr_usd_page_loader(date)\n d = cbr_usd_page_parser(text)\n excs = [RawExchange(date, i) for i in d]\n print([str(i) for i in excs])\n" }, { "alpha_fraction": 0.49512261152267456, "alphanum_fraction": 0.501777708530426, "avg_line_length": 32.750770568847656, "blob_id": "fbae518c5570699c21774cd8bd75018e7b4f7b61", "content_id": "88d6451a5d0590db927a4875a8a867371069214d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10985, "license_type": "no_license", "max_line_length": 201, "num_lines": 325, "path": "/exc_loader/other_exc_loader.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import asyncio\nimport datetime\nimport os\n\nimport aiohttp\nimport lxml\nimport requests\nimport tqdm\nfrom lxml.html.clean import Cleaner\n\nBACKUP_DATA_DIRECTORY = os.path.join(\"D:\\\\repo\\\\taxes_calc\", \"backup\")\n\nRUS = [\"AUD\",\n \"AZN\",\n \"GBP\",\n \"AMD\",\n \"BYN\",\n \"BGN\",\n \"BRL\",\n \"HUF\",\n \"HKD\",\n \"DKK\",\n \"USD\",\n \"EUR\",\n \"INR\",\n \"KZT\",\n \"CAD\",\n \"KGS\",\n \"CNY\",\n \"MDL\",\n \"NOK\",\n \"PLN\",\n \"RON\",\n \"XDR\",\n \"SGD\",\n \"TJS\",\n \"TRY\",\n \"TMT\",\n \"UZS\",\n \"UAH\",\n \"CZK\",\n \"SEK\",\n \"CHF\",\n \"ZAR\",\n \"KRW\",\n \"JPY\",\n ]\n\n\ndef check_date(curr: str, dt: datetime.datetime, page):\n text = f\"This {curr} currency table offers current and historic\"\n root = lxml.html.fromstring(page)\n sel = \"#contentL > div.module.clearfix > p.historicalRateTable-date\"\n # xpath = '//*[@id=\"contentL\"]/div[1]' # /p[1]\n r = root.cssselect(sel)\n # r = root.xpath(xpath)\n dt_from_page = datetime.datetime.strptime(r[0].text.split(\" \")[0], \"%Y-%m-%d\")\n # dt_from_page = dateutil.parser.parse(r[0].text)\n return dt_from_page.date() == dt.date() and text in page\n\n\ndef no_data_checker(page):\n bad_text = \"Build current and historic rate tables with your chosen base currency with XE Currency Tables. For commercial purposes, get an automated currency feed through the XE Currency Data API.\"\n return bad_text in page\n\n\ndef parse(page: str):\n ex = {}\n root = lxml.html.fromstring(page)\n sel = \"#historicalRateTbl > tbody\"\n r = root.cssselect(sel)\n for row in r[0]:\n if not isinstance(row, lxml.html.HtmlComment):\n # print(dir(row[0]))\n code = row[0].text_content()\n val = float(row[2].text)\n ex[code] = val\n return ex\n\n\ndef gen_dates(start, end):\n c = start\n while end >= c:\n yield c\n c = c + datetime.timedelta(days=1)\n\n\nclass MockResult:\n def __init__(self, params: dict, code: int, res):\n self._mdata = params\n self._code = code\n self._resp = res\n\n @property\n def mdata(self):\n return self._mdata\n\n def result(self):\n return self._code, self._resp\n\n\nclass Collector:\n def __init__(self, filename: str):\n self.waiter = None\n self.pool = []\n self.base = \"https://www.xe.com/currencytables/\"\n self.out = filename\n self.session = None\n self.last_page = {}\n self.last_page_date = {}\n\n fn = \"backup_xe_\" + datetime.datetime.now().strftime(\"%Y-%m-%d\") + \".csv\"\n self.backup_filename = os.path.join(BACKUP_DATA_DIRECTORY, fn)\n\n async def _get_session(self):\n if self.session is not None:\n if not self.session.closed:\n return self.session\n connector = aiohttp.TCPConnector(keepalive_timeout=10 * 60)\n # self.session = aiohttp.ClientSession(connector=connector)\n self.session = await aiohttp.ClientSession(connector=connector).__aenter__()\n return self.session\n\n def _sync_download(self, url, params: dict):\n resp = requests.get(url, data=params)\n if resp.status_code != 200:\n err = \"response: {} -> {}\".format(resp.status_code, resp.url)\n return resp.status_code, err\n return resp.status_code, resp.text\n\n async def close_connections(self):\n if self.session is not None:\n if not self.session.closed:\n await self.session.__aexit__(None, None, None)\n self.session = None\n\n async def async_download(self, url, params: dict):\n session = await self._get_session()\n if session.closed:\n raise Exception(\"session was closed\")\n # async with session:\n async with session.get(url, params=params) as resp:\n if resp.status != 200:\n err = \"response: {} -> {}\".format(resp.status, resp.url)\n return resp.status, err\n\n return resp.status, await resp.text()\n\n def sync_download(self, currs: dict):\n print(f\"loads currencies {list(currs.keys())} from {self.base}\")\n total = sum([len(list(gen_dates(*i))) for i in currs.values()])\n pbar = tqdm.tqdm(total=total)\n counter = 0\n for curr, st in currs.items():\n for date in gen_dates(*st):\n sdate = date.strftime(\"%Y-%m-%d\")\n params = {\"from\": curr, \"date\": sdate}\n\n code, resp = self._sync_download(self.base, params)\n mdata = {\"curr\": curr, \"date\": date}\n task = MockResult(mdata, code, resp)\n self.pool.append(task)\n pbar.update(1)\n counter += 1\n\n if counter > 1000:\n self.parse(self.out)\n self.pool.clear()\n counter = 0\n\n print(f\"\\nComplete {curr} on range {currs[curr][0]}:{currs[curr][1]}\")\n pbar.close()\n self.parse(self.out)\n\n async def download(self, currs: dict):\n print(f\"loads currencies {list(currs.keys())} from {self.base}\")\n total = sum([len(list(gen_dates(*i))) for i in currs.values()])\n pbar = tqdm.tqdm(total=total)\n\n for curr, st in currs.items():\n for date in gen_dates(*st):\n sdate = date.strftime(\"%Y-%m-%d\")\n params = {\"from\": curr, \"date\": sdate}\n\n task = asyncio.create_task(self.async_download(self.base, params))\n task.mdata = {\"curr\": curr, \"date\": date}\n self.pool.append(task)\n\n if len(self.pool) >= 50:\n await asyncio.wait(self.pool)\n self.parse(self.out)\n self.pool.clear()\n\n pbar.update(1)\n\n await asyncio.wait(self.pool)\n self.parse(self.out)\n pbar.close()\n self.pool.clear()\n\n await self.close_connections()\n\n def parse(self, filename: str):\n\n with open(filename, \"a\") as f, open(self.backup_filename, \"a\") as b:\n for r in self.pool:\n status, page = r.result()\n\n if status == 200:\n curr = r.mdata[\"curr\"]\n date = r.mdata[\"date\"]\n sdate = date.strftime(\"%Y-%m-%d\")\n\n if no_data_checker(page):\n if self.last_page.get(curr) is None:\n raise Exception(\n f\"{curr} page on first date {sdate} for load exchanges has no data. Please, use too early date.\")\n else:\n print(f\"{curr} page on {date} has no data. Used previous date {self.last_page_date[curr]}.\")\n page = self.last_page[curr]\n else:\n if not check_date(curr, date, page):\n raise Exception(\"Bad check {curr} on {dt}\")\n self.last_page[curr] = page\n self.last_page_date[curr] = date\n\n data = parse(page)\n line = \"{},{},{},{}\\n\".format(curr, sdate, data[\"USD\"], data[\"RUB\"])\n f.write(line)\n\n back = \",\".join((sdate, \"to_rub\", curr, str(data[\"RUB\"]), \"1\", \"Обратная\"))\n b.write(back + \"\\n\")\n back = \",\".join((sdate, \"to_usd\", curr, str(data[\"USD\"]), \"1\", \"Обратная\"))\n b.write(back + \"\\n\")\n\n async def fake_download(self, currs: dict, fake_dt: datetime.datetime):\n print(f\"loads currencies {list(currs.keys())} from {self.base}\")\n total = sum([len(list(gen_dates(*i))) for i in currs.values()])\n pbar = tqdm.tqdm(total=total)\n\n for curr, st in currs.items():\n for date in gen_dates(*st):\n if curr not in RUS:\n date = fake_dt\n sdate = date.strftime(\"%Y-%m-%d\")\n params = {\"from\": curr, \"date\": sdate}\n\n task = asyncio.create_task(self.async_download(self.base, params))\n task.mdata = {\"curr\": curr, \"date\": date}\n self.pool.append(task)\n\n if len(self.pool) >= 50:\n await asyncio.wait(self.pool)\n self.fake_parse(self.out)\n self.pool.clear()\n\n pbar.update(1)\n\n await asyncio.wait(self.pool)\n self.fake_parse(self.out)\n pbar.close()\n self.pool.clear()\n\n def fake_parse(self, filename: str):\n with open(filename, \"a\", encoding=\"utf8\") as f:\n for r in self.pool:\n status, page = r.result()\n\n if status == 200:\n curr = r.mdata[\"curr\"]\n date = r.mdata[\"date\"]\n sdate = date.strftime(\"%Y-%m-%d\")\n\n if no_data_checker(page):\n if self.last_page.get(curr) is None:\n raise Exception(\n f\"{curr} page on first date {sdate} for load exchanges has no data. Please, use too early date.\")\n else:\n print(f\"{curr} page on {date} has no data. Used previous date {self.last_page_date[curr]}.\")\n page = self.last_page[curr]\n else:\n if not check_date(curr, date, page):\n raise Exception(\"Bad check {curr} on {dt}\")\n self.last_page[curr] = page\n self.last_page_date[curr] = date\n\n data = parse(page)\n line = \"{},{},{},{}\\n\".format(curr, sdate, data[\"USD\"], data[\"RUB\"])\n f.write(line)\n\n def manual(self):\n lines = []\n for r in self.pool:\n status, page = r.result()\n\n if status == 200:\n curr = r.mdata[\"curr\"]\n date = r.mdata[\"date\"]\n sdate = date.strftime(\"%Y-%m-%d\")\n\n data = parse(page)\n\n line = \"{},{},{},{}\\n\".format(curr, sdate, data[\"USD\"], data[\"RUB\"])\n lines.append(line)\n return lines\n\n\nif __name__ == \"__main__\":\n start = datetime.datetime(2019, 12, 25, 0, 0)\n end = datetime.datetime(2020, 4, 17, 0, 0)\n now = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n outfile = f\"D:\\\\repo\\\\taxes_calc\\\\exchange_data/alter_curr_{now}.csv\"\n # if os.path.isfile(outfile):\n # os.remove(outfile)\n\n c = Collector(outfile)\n\n with open(\"all_currs.txt\", \"r\") as f:\n lines = f.readlines()\n\n all_currs = [i.strip() for i in lines]\n currs = {i: (start, end) for i in all_currs}\n # fake_dt = datetime.datetime(2020, 3, 3)\n # asyncio.run(c.fake_download(currs, fake_dt))\n asyncio.get_event_loop().run_until_complete(c.download(currs))\n" }, { "alpha_fraction": 0.5988258123397827, "alphanum_fraction": 0.6477494835853577, "avg_line_length": 29.058822631835938, "blob_id": "9a46e4ec6e54f13e075161588d3e252c5ef21bec", "content_id": "826d80668fd6a252da4c9286530af81bb9de2cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 112, "num_lines": 34, "path": "/debug_utils/manual_loader.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\n\nimport requests\n\nfrom base.utils import is_correct_data\nfrom exc_loader.models import d2s2\n\n\ndef load(date: datetime.datetime):\n base_url = \"https://www.cbr.ru/hd_base/seldomc/sc_daily/\"\n dt = d2s2(date)\n post_data = {\"UniDbQuery.Posted\": \"True\", \"UniDbQuery.To\": dt}\n body = f\"UniDbQuery.Posted=True&UniDbQuery.ToDate={dt}\"\n a = requests.post(base_url, data=post_data) # , headers=headers\n return a.text\n\n\nbase_url = \"https://www.cbr.ru/hd_base/seldomc/sc_daily/\"\n\n# s = requests.session()\n# s.headers.update(H)\n# r = s.get(base_url)\n# print(s.headers, s.cookies)\n# print(r.status_code)\n# cookie_val = dict(_ym_uid=1586965843231132748, _ym_d=1586965843, _ym_isad=1, accept=1, _ym_visorc_5774506='b')\n# for k, v in cookie_val.items():\n# if not isinstance(v, str):\n# cookie_val[k] = str(v)\n# requests.utils.add_dict_to_cookiejar(s.cookies,\n# cookie_val)\n\n\ndt = datetime.datetime(2019, 12, 27)\nprint(is_correct_data(dt, load(dt)))\n" }, { "alpha_fraction": 0.580110490322113, "alphanum_fraction": 0.5834254026412964, "avg_line_length": 25.617647171020508, "blob_id": "f9140c4dd2741fed8db184cfafee556e814afd40", "content_id": "12d2d1a0d18cc04fc50b59dc44ddb3b781fc1881", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 905, "license_type": "no_license", "max_line_length": 109, "num_lines": 34, "path": "/debug_utils/soap_parser.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\n\nfrom lxml.etree import fromstring\n\nfrom exc_loader.codes import codes\n\n\ndef cbr_usd_xml_parser(xml: str):\n raw = fromstring(xml)\n header = raw.xpath(\"//ReutersValutesData\")\n # print(dir(header[0]))\n sdate = header[0].get(\"OnDate\")\n date = datetime.datetime.strptime(sdate, \"%Y%m%d\")\n print(date)\n\n currencies = raw.xpath(\"//ReutersValutesData/Currency\")\n raw_excs = []\n for curr in currencies:\n cdata = {}\n for child in curr:\n cdata[child.tag] = child.text\n digit = cdata[\"num_code\"]\n chr = codes[cdata[\"num_code\"]]\n raw_excs.append(\n {\"NumCode\": digit, \"CharCode\": chr, \"Name\": chr, \"Convtype\": int(cdata[\"dir\"]), \"Nominal\": 1,\n \"date\": date})\n\n return raw_excs\n\n\nwith open(\"../exc_loader/good_soap.xml\", \"rb\") as f:\n raw = f.read()\n\nprint(cbr_usd_xml_parser(raw))\n" }, { "alpha_fraction": 0.5771749019622803, "alphanum_fraction": 0.6183348894119263, "avg_line_length": 25.725000381469727, "blob_id": "f54ccd740e9aea1e095e36fd855bd7d392a4270e", "content_id": "14e292f5184f365c28e20684a553b266a55d5d53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 129, "num_lines": 40, "path": "/exc_loader/cbr_get_rub.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\n\nimport requests\nfrom lxml import etree\n\nfrom exc_loader.models import d2s1, RawExchange\n\n\ndef cbr_rub_page_loader(date: datetime.datetime) -> str:\n base_url = \"http://www.cbr.ru/scripts/XML_daily_eng.asp?date_req={}\"\n dt = d2s1(date)\n url = base_url.format(dt)\n print(url)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36\"}\n\n a = requests.get(url, headers=headers)\n return a.text\n\n\ndef cbr_rub_page_parser(text: str) -> list:\n data = []\n root = etree.fromstring(text.encode(\"cp1251\"))\n for appt in root.getchildren():\n dct = {}\n for elem in appt.getchildren():\n dct[elem.tag] = elem.text\n dct[\"Convtype\"] = 0\n data.append(dct)\n\n assert len(data) == 34\n return data\n\n\nif __name__ == \"__main__\":\n date = datetime.datetime(2020, 1, 1)\n text = cbr_rub_page_loader(date)\n d = cbr_rub_page_parser(text)\n excs = [RawExchange(date, i) for i in d]\n print([str(i) for i in excs])\n" }, { "alpha_fraction": 0.45748117566108704, "alphanum_fraction": 0.5468245148658752, "avg_line_length": 23.447368621826172, "blob_id": "e4d92d4e19ddea021a22701e3119baad90320755", "content_id": "738df313bcd154d80fc7d7f498b63c16387b6fa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/exc_loader/ranges.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\n\n\ndef get_days(start, end):\n if start > end:\n raise Exception(f\"{start} > {end}\")\n\n curr = start\n while curr.date() <= end.date():\n yield curr\n curr = curr + datetime.timedelta(days=1)\n\n\ndef test_ranges():\n d1 = (datetime.datetime(2019, 12, 29), datetime.datetime(2020, 2, 4))\n r1 = list(get_days(*d1))\n assert r1[0] == d1[0]\n assert r1[-1] == d1[1]\n\n d2 = (datetime.datetime(2019, 12, 31), datetime.datetime(2020, 1, 2))\n r2 = list(get_days(*d2))\n assert r2[0] == d2[0]\n assert r2[-1] == d2[1]\n\n d = (datetime.datetime(2019, 9, 6), datetime.datetime(2019, 9, 6))\n r = list(get_days(*d))\n assert len(r) == 1\n assert r[0] == d[0]\n\n d = (datetime.datetime(2019, 9, 6), datetime.datetime(2019, 9, 7))\n r = list(get_days(*d))\n assert len(r) == 2\n assert r[0] == d[0]\n assert r[-1] == d[1]\n\n\nif __name__ == \"__main__\":\n test_ranges()\n" }, { "alpha_fraction": 0.47588831186294556, "alphanum_fraction": 0.5190355181694031, "avg_line_length": 24.015872955322266, "blob_id": "3d174347275dbf3a19060e7cbd5da5ce6c2ff120", "content_id": "32f82753100a72874ca510e45f82a5d1938add3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1576, "license_type": "no_license", "max_line_length": 95, "num_lines": 63, "path": "/exc_loader/backup_cmp_util.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\nfrom pprint import pprint, pformat\n\n\ndef curr2dict(fn: str):\n with open(fn, \"r\", encoding=\"utf8\") as f:\n lines = f.readlines()\n\n data = {}\n for line in lines:\n sdate, convert, exc, rate, nominal, direct = line.split(\",\")\n key = \",\".join((exc, sdate, convert, nominal, direct))\n data[key] = rate\n\n return data\n\n\ndef cmp(d1, d2):\n keys = set(d1.keys())\n keys.update(d2.keys())\n\n res = []\n\n for k in keys:\n d1_data = None\n d2_data = None\n\n if k in d1:\n d1_data = float(d1[k])\n if k in d2:\n d2_data = float(d2[k])\n\n if d1_data is None or d2_data is None:\n raise Exception(f\"{k} not in {'d1' if d1_data is None else 'd2'}\")\n else:\n if d1_data != d2_data:\n res.append((k, d1_data, d2_data, abs(d1_data - d2_data)))\n\n return res\n\n\ndef get_diff(file1: str, file2: str):\n d1 = curr2dict(file1)\n d2 = curr2dict(file2)\n res = cmp(d1, d2)\n res = sorted(res, key=lambda x: datetime.datetime.strptime(x[0].split(\",\")[1], \"%Y-%m-%d\"))\n srt = []\n\n for i in res:\n exc, sdate, convert, nominal, direct = i[0].split(\",\")\n\n if convert == \"to_usd\" and i[3] > 0.01:\n # print(i)\n z = (i[0], round(i[1], 3), round(i[2], 3), round(i[3], 3))\n srt.append(z)\n\n srt = sorted(srt, key=lambda x: x[3])\n pprint(srt)\n\n\nif __name__ == \"__main__\":\n currs = (\"../backup/bad_backup_2020-04-20.csv\", \"../backup/backup_2020-04-20.csv\")\n get_diff(currs[0], currs[1])\n" }, { "alpha_fraction": 0.5457570552825928, "alphanum_fraction": 0.5640599131584167, "avg_line_length": 24.04166603088379, "blob_id": "c9ac076c7705cd019058e3e24102e016400fb2c2", "content_id": "943d16dd99aeaf1b30141c35c2e4395efc67f8e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 111, "num_lines": 24, "path": "/exc_loader/codes_parser.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import requests\nfrom lxml.html import fromstring\n\nurl = \"https://www.iban.com/currency-codes\"\n\nr = requests.get(url)\npage = fromstring(r.content)\nsel = \"body > div.boxed > div.flat-row.pad-top20px.pad-bottom70px > div > div > div > div > table > tbody > tr\"\nselect = page.cssselect(sel)\nc2d = {}\nd2c = {}\n\nwith open(\"../codes.csv\", \"w\") as f:\n for elem in select:\n chr = elem[2].text\n dig = elem[3].text\n if chr and dig:\n c2d[chr] = dig\n d2c[dig] = chr\n print(dig, chr)\n line = f\"{dig},{chr}\\n\"\n f.write(line)\n\nprint(d2c)\n" }, { "alpha_fraction": 0.5707006454467773, "alphanum_fraction": 0.5923566818237305, "avg_line_length": 28.074073791503906, "blob_id": "a9a237a3b5f794eaa7713675e7d46291bf437cb5", "content_id": "2a9e45fff5babb51df8fc09f296d2ad065af9f14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 78, "num_lines": 27, "path": "/base/utils.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "import datetime\nimport re\n\n\ndef no_data_finder(text):\n \"\"\"Raw page checker for no-info-page\"\"\"\n if \"За выбранный вами период нет информации\" in text:\n return True\n return False\n\n\ndef is_correct_data(dt, text):\n \"\"\"Raw page checker on correct date.\"\"\"\n # \"Данные на 27.12.2019.\"\n found = re.search(r'(Данные на \\d+\\.\\d+\\.\\d+)\\.', text)\n if found is None:\n raise Exception(f\"Not found text about date: \\\"Данные на %d.%m.%Y.\\\"\")\n # 27.12.2019 format here\n text = found.group()\n raw_date = text.rstrip(\".\").split(\"Данные на \")[1]\n try:\n date = datetime.datetime.strptime(raw_date, \"%d.%m.%Y\")\n except Exception:\n print(f\"Invalid date {raw_date}. Text: {text}\")\n raise\n else:\n return dt.date() == date.date()\n" }, { "alpha_fraction": 0.44360071420669556, "alphanum_fraction": 0.5350928902626038, "avg_line_length": 61.796295166015625, "blob_id": "7d267d1a92397494347ed141ade8d289c8ec78b1", "content_id": "66f6f935e51ab2761ce35ac2f35c889023eae486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15184, "license_type": "no_license", "max_line_length": 110, "num_lines": 216, "path": "/exc_loader/tst_raw_usd_exc.py", "repo_name": "cascad/cbr_exchange_rates", "src_encoding": "UTF-8", "text": "test_raw_cbr_usd_1 = [\n {'NumCode': '008', 'CharCode': 'ALL ', 'Name': 'Албанский лек', 'Convtype': 0, 'Value': '114,3050'},\n {'NumCode': '012', 'CharCode': 'DZD ', 'Name': 'Алжирский динар', 'Convtype': 0,\n 'Value': '122,8350'},\n {'NumCode': '973', 'CharCode': 'AOA ', 'Name': 'Ангольская кванза', 'Convtype': 0,\n 'Value': '518,3750'},\n {'NumCode': '032', 'CharCode': 'ARS ', 'Name': 'Аргентинское песо', 'Convtype': 0,\n 'Value': '63,4635'},\n {'NumCode': '533', 'CharCode': 'AWG ', 'Name': 'Арубанский флорин', 'Convtype': 0,\n 'Value': '1,8100'},\n {'NumCode': '971', 'CharCode': 'AFN ', 'Name': 'Афганский афгани', 'Convtype': 0,\n 'Value': '75,9500'},\n {'NumCode': '044', 'CharCode': 'BSD ', 'Name': 'Багамский доллар', 'Convtype': 0,\n 'Value': '1,0000'},\n {'NumCode': '050', 'CharCode': 'BDT ', 'Name': 'Бангладешская така', 'Convtype': 0,\n 'Value': '84,8300'},\n {'NumCode': '052', 'CharCode': 'BBD ', 'Name': 'Барбадосский доллар', 'Convtype': 0,\n 'Value': '2,0191'},\n {'NumCode': '048', 'CharCode': 'BHD ', 'Name': 'Бахрейнский динар', 'Convtype': 0,\n 'Value': '0,3776'},\n {'NumCode': '084', 'CharCode': 'BZD ', 'Name': 'Белизский доллар', 'Convtype': 0,\n 'Value': '2,0157'},\n {'NumCode': '060', 'CharCode': 'BMD ', 'Name': 'Бермудский доллар', 'Convtype': 0,\n 'Value': '1,0000'},\n {'NumCode': '068', 'CharCode': 'BOB ', 'Name': 'Боливийский боливиано', 'Convtype': 0,\n 'Value': '6,9100'},\n {'NumCode': '072', 'CharCode': 'BWP ', 'Name': 'Ботсванская пула', 'Convtype': 1,\n 'Value': '0,0850'},\n {'NumCode': '096', 'CharCode': 'BND ', 'Name': 'Брунейский доллар', 'Convtype': 0,\n 'Value': '1,4455'},\n {'NumCode': '108', 'CharCode': 'BIF ', 'Name': 'Бурундийский франк', 'Convtype': 0,\n 'Value': '1 888,4400'},\n {'NumCode': '064', 'CharCode': 'BTN ', 'Name': 'Бутанский нгултрум', 'Convtype': 0,\n 'Value': '74,9850'},\n {'NumCode': '548', 'CharCode': 'VUV ', 'Name': 'Вануатский вату', 'Convtype': 0,\n 'Value': '123,6600'},\n {'NumCode': '928', 'CharCode': 'VES ', 'Name': 'Венесуэльский боливар cоберано', 'Convtype': 0,\n 'Value': '72 169,7429'},\n {'NumCode': '951', 'CharCode': 'XCD ', 'Name': 'Восточно - карибский доллар', 'Convtype': 0,\n 'Value': '2,7100'},\n {'NumCode': '704', 'CharCode': 'VND ', 'Name': 'Вьетнамский донг', 'Convtype': 0,\n 'Value': '23 460,0000'},\n {'NumCode': '332', 'CharCode': 'HTG ', 'Name': 'Гаитский гурд', 'Convtype': 0, 'Value': '95,2635'},\n {'NumCode': '328', 'CharCode': 'GYD ', 'Name': 'Гайанский доллар', 'Convtype': 0,\n 'Value': '208,8250'},\n {'NumCode': '270', 'CharCode': 'GMD ', 'Name': 'Гамбийский даласи', 'Convtype': 0,\n 'Value': '51,2500'},\n {'NumCode': '936', 'CharCode': 'GHS ', 'Name': 'Ганский седи', 'Convtype': 0, 'Value': '5,5450'},\n {'NumCode': '320', 'CharCode': 'GTQ ', 'Name': 'Гватемальский кетсаль', 'Convtype': 0,\n 'Value': '7,6300'},\n {'NumCode': '324', 'CharCode': 'GNF ', 'Name': 'Гвинейский франк', 'Convtype': 0,\n 'Value': '9 542,5000'},\n {'NumCode': '292', 'CharCode': 'GIP ', 'Name': 'Гибралтарский фунт', 'Convtype': 1,\n 'Value': '1,1712'},\n {'NumCode': '340', 'CharCode': 'HNL ', 'Name': 'Гондурасская лемпира', 'Convtype': 0,\n 'Value': '24,7410'},\n {'NumCode': '981', 'CharCode': 'GEL ', 'Name': 'Грузинский лари', 'Convtype': 0, 'Value': '3,0855'},\n {'NumCode': '807', 'CharCode': 'MKD ', 'Name': 'Денар Республики Македония', 'Convtype': 0,\n 'Value': '57,2050'},\n {'NumCode': '784', 'CharCode': 'AED ', 'Name': 'Дирхам ОАЭ', 'Convtype': 0, 'Value': '3,6730'},\n {'NumCode': '930', 'CharCode': 'STN ', 'Name': 'Добра Сан-Томе и Принсипи', 'Convtype': 0,\n 'Value': '22,8426'},\n {'NumCode': '516', 'CharCode': 'NAD ', 'Name': 'Доллар Намибии', 'Convtype': 0, 'Value': '17,1735'},\n {'NumCode': '090', 'CharCode': 'SBD ', 'Name': 'Доллар Соломоновых Островов', 'Convtype': 1,\n 'Value': '0,1206'},\n {'NumCode': '780', 'CharCode': 'TTD ', 'Name': 'Доллар Тринидада и Тобаго', 'Convtype': 0,\n 'Value': '6,7567'},\n {'NumCode': '242', 'CharCode': 'FJD ', 'Name': 'Доллар Фиджи', 'Convtype': 1, 'Value': '0,4235'},\n {'NumCode': '214', 'CharCode': 'DOP ', 'Name': 'Доминиканское песо', 'Convtype': 0,\n 'Value': '53,7200'},\n {'NumCode': '818', 'CharCode': 'EGP ', 'Name': 'Египетский фунт', 'Convtype': 0,\n 'Value': '15,7500'},\n {'NumCode': '967', 'CharCode': 'ZMW ', 'Name': 'Замбийская квача', 'Convtype': 0,\n 'Value': '16,9875'},\n {'NumCode': '360', 'CharCode': 'IDR ', 'Name': 'Индонезийская рупия', 'Convtype': 0,\n 'Value': '16 172,5150'},\n {'NumCode': '400', 'CharCode': 'JOD ', 'Name': 'Иорданский динар', 'Convtype': 0,\n 'Value': '0,7090'}, {'NumCode': '368', 'CharCode': 'IQD ', 'Name': 'Иракский динар', 'Convtype': 0,\n 'Value': '1 193,8100'},\n {'NumCode': '364', 'CharCode': 'IRR ', 'Name': 'Иранский риал', 'Convtype': 0,\n 'Value': '42 000,0000'},\n {'NumCode': '352', 'CharCode': 'ISK ', 'Name': 'Исландская крона', 'Convtype': 0,\n 'Value': '139,3500'},\n {'NumCode': '886', 'CharCode': 'YER ', 'Name': 'Йеменский риал', 'Convtype': 0,\n 'Value': '250,3000'},\n {'NumCode': '116', 'CharCode': 'KHR ', 'Name': 'Камбоджийский риель', 'Convtype': 0,\n 'Value': '4 080,0000'},\n {'NumCode': '634', 'CharCode': 'QAR ', 'Name': 'Катарский риал', 'Convtype': 0, 'Value': '3,6410'},\n {'NumCode': '404', 'CharCode': 'KES ', 'Name': 'Кенийский шиллинг', 'Convtype': 0,\n 'Value': '105,9000'},\n {'NumCode': '598', 'CharCode': 'PGK ', 'Name': 'Кина Папуа-Новой Гвинеи', 'Convtype': 1,\n 'Value': '0,2925'},\n {'NumCode': '170', 'CharCode': 'COP ', 'Name': 'Колумбийское песо', 'Convtype': 0,\n 'Value': '4 089,0000'},\n {'NumCode': '174', 'CharCode': 'KMF ', 'Name': 'Коморский франк', 'Convtype': 0,\n 'Value': '443,5000'},\n {'NumCode': '977', 'CharCode': 'BAM ', 'Name': 'Конвертируемая марка', 'Convtype': 0,\n 'Value': '1,8254'},\n {'NumCode': '976', 'CharCode': 'CDF ', 'Name': 'Конголезский франк', 'Convtype': 0,\n 'Value': '1 707,5000'},\n {'NumCode': '188', 'CharCode': 'CRC ', 'Name': 'Костариканский колон', 'Convtype': 0,\n 'Value': '565,5000'},\n {'NumCode': '192', 'CharCode': 'CUP ', 'Name': 'Кубинское песо', 'Convtype': 0, 'Value': '1,0000'},\n {'NumCode': '414', 'CharCode': 'KWD ', 'Name': 'Кувейтский динар', 'Convtype': 0,\n 'Value': '0,3114'}, {'NumCode': '418', 'CharCode': 'LAK ', 'Name': 'Лаосский кип', 'Convtype': 0,\n 'Value': '8 928,0000'},\n {'NumCode': '430', 'CharCode': 'LRD ', 'Name': 'Либерийский доллар', 'Convtype': 0,\n 'Value': '199,5100'},\n {'NumCode': '422', 'CharCode': 'LBP ', 'Name': 'Ливанский фунт', 'Convtype': 0,\n 'Value': '1 512,0000'},\n {'NumCode': '434', 'CharCode': 'LYD ', 'Name': 'Ливийский динар', 'Convtype': 0, 'Value': '1,4233'},\n {'NumCode': '480', 'CharCode': 'MUR ', 'Name': 'Маврикийская рупия', 'Convtype': 0,\n 'Value': '39,3500'},\n {'NumCode': '929', 'CharCode': 'MRU ', 'Name': 'Мавританская угия', 'Convtype': 0,\n 'Value': '37,6000'},\n {'NumCode': '454', 'CharCode': 'MWK ', 'Name': 'Малавийская квача', 'Convtype': 0,\n 'Value': '736,5800'},\n {'NumCode': '969', 'CharCode': 'MGA ', 'Name': 'Малагасийский ариари', 'Convtype': 0,\n 'Value': '3 765,0000'},\n {'NumCode': '458', 'CharCode': 'MYR ', 'Name': 'Малайзийский ринггит', 'Convtype': 0,\n 'Value': '4,3865'},\n {'NumCode': '462', 'CharCode': 'MVR ', 'Name': 'Мальдивская руфия', 'Convtype': 0,\n 'Value': '15,4600'},\n {'NumCode': '504', 'CharCode': 'MAD ', 'Name': 'Марокканский дирхам', 'Convtype': 0,\n 'Value': '9,7913'},\n {'NumCode': '484', 'CharCode': 'MXN ', 'Name': 'Мексиканское песо', 'Convtype': 0,\n 'Value': '23,5190'},\n {'NumCode': '943', 'CharCode': 'MZN ', 'Name': 'Мозамбикский метикал', 'Convtype': 0,\n 'Value': '66,3250'},\n {'NumCode': '496', 'CharCode': 'MNT ', 'Name': 'Монгольский тугрик', 'Convtype': 0,\n 'Value': '2 775,0000'},\n {'NumCode': '524', 'CharCode': 'NPR ', 'Name': 'Непальская рупия', 'Convtype': 0,\n 'Value': '119,9400'},\n {'NumCode': '566', 'CharCode': 'NGN ', 'Name': 'Нигерийская найра', 'Convtype': 0,\n 'Value': '306,6500'},\n {'NumCode': '558', 'CharCode': 'NIO ', 'Name': 'Никарагуанская золотая кордоба', 'Convtype': 0,\n 'Value': '33,7350'},\n {'NumCode': '554', 'CharCode': 'NZD ', 'Name': 'Новозеландский доллар', 'Convtype': 1,\n 'Value': '0,5821'},\n {'NumCode': '376', 'CharCode': 'ILS ', 'Name': 'Новый израильский шекель', 'Convtype': 0,\n 'Value': '3,5975'},\n {'NumCode': '901', 'CharCode': 'TWD ', 'Name': 'Новый тайваньский доллар', 'Convtype': 0,\n 'Value': '30,2590'},\n {'NumCode': '512', 'CharCode': 'OMR ', 'Name': 'Оманский риал', 'Convtype': 0, 'Value': '0,3850'},\n {'NumCode': '776', 'CharCode': 'TOP ', 'Name': 'Паанга Королевства Тонга', 'Convtype': 1,\n 'Value': '0,4344'},\n {'NumCode': '586', 'CharCode': 'PKR ', 'Name': 'Пакистанская рупия', 'Convtype': 0,\n 'Value': '159,2000'},\n {'NumCode': '600', 'CharCode': 'PYG ', 'Name': 'Парагвайский гуарани', 'Convtype': 0,\n 'Value': '6 643,0000'},\n {'NumCode': '446', 'CharCode': 'MOP ', 'Name': 'Патака Макао', 'Convtype': 0, 'Value': '8,0030'},\n {'NumCode': '604', 'CharCode': 'PEN ', 'Name': 'Перуанский соль', 'Convtype': 0, 'Value': '3,5546'},\n {'NumCode': '222', 'CharCode': 'SVC ', 'Name': 'Сальвадорский колон', 'Convtype': 0,\n 'Value': '8,7545'},\n {'NumCode': '682', 'CharCode': 'SAR ', 'Name': 'Саудовский риял', 'Convtype': 0, 'Value': '3,7608'},\n {'NumCode': '748', 'CharCode': 'SZL ', 'Name': 'Свазилендский лилангени', 'Convtype': 0,\n 'Value': '17,1735'},\n {'NumCode': '408', 'CharCode': 'KPW ', 'Name': 'Северокорейская вона', 'Convtype': 0,\n 'Value': '130,0000'},\n {'NumCode': '690', 'CharCode': 'SCR ', 'Name': 'Сейшельская рупия', 'Convtype': 0,\n 'Value': '13,7050'},\n {'NumCode': '941', 'CharCode': 'RSD ', 'Name': 'Сербский динар', 'Convtype': 0,\n 'Value': '109,0200'},\n {'NumCode': '760', 'CharCode': 'SYP ', 'Name': 'Сирийский фунт', 'Convtype': 0,\n 'Value': '436,5000'},\n {'NumCode': '706', 'CharCode': 'SOS ', 'Name': 'Сомалийский шиллинг', 'Convtype': 0,\n 'Value': '578,5000'},\n {'NumCode': '938', 'CharCode': 'SDG ', 'Name': 'Суданский фунт', 'Convtype': 0, 'Value': '55,1277'},\n {'NumCode': '968', 'CharCode': 'SRD ', 'Name': 'Суринамский доллар', 'Convtype': 0,\n 'Value': '7,4750'},\n {'NumCode': '694', 'CharCode': 'SLL ', 'Name': 'Сьерра-Леонский леоне', 'Convtype': 0,\n 'Value': '9 719,2100'},\n {'NumCode': '764', 'CharCode': 'THB ', 'Name': 'Таиландский бат', 'Convtype': 0,\n 'Value': '32,5450'},\n {'NumCode': '834', 'CharCode': 'TZS ', 'Name': 'Танзанийский шиллинг', 'Convtype': 0,\n 'Value': '2 306,0000'},\n {'NumCode': '788', 'CharCode': 'TND ', 'Name': 'Тунисский динар', 'Convtype': 0, 'Value': '2,9284'},\n {'NumCode': '800', 'CharCode': 'UGX ', 'Name': 'Угандийский шиллинг', 'Convtype': 0,\n 'Value': '3 832,5000'},\n {'NumCode': '858', 'CharCode': 'UYU ', 'Name': 'Уругвайское песо', 'Convtype': 0,\n 'Value': '45,9550'},\n {'NumCode': '608', 'CharCode': 'PHP ', 'Name': 'Филиппинское писо', 'Convtype': 0,\n 'Value': '51,1100'},\n {'NumCode': '262', 'CharCode': 'DJF ', 'Name': 'Франк Джибути', 'Convtype': 0, 'Value': '178,0250'},\n {'NumCode': '950', 'CharCode': 'XAF ', 'Name': 'Франк КФА ВЕАС', 'Convtype': 0,\n 'Value': '611,6200'},\n {'NumCode': '952', 'CharCode': 'XOF ', 'Name': 'Франк КФА ВСЕАО', 'Convtype': 0,\n 'Value': '583,7000'},\n {'NumCode': '646', 'CharCode': 'RWF ', 'Name': 'Франк Руанды', 'Convtype': 0, 'Value': '952,0285'},\n {'NumCode': '654', 'CharCode': 'SHP ', 'Name': 'Фунт Св. Елены', 'Convtype': 1, 'Value': '1,1654'},\n {'NumCode': '238', 'CharCode': 'FKP ', 'Name': 'Фунт Фолклендских островов', 'Convtype': 1,\n 'Value': '1,1712'},\n {'NumCode': '191', 'CharCode': 'HRK ', 'Name': 'Хорватская куна', 'Convtype': 0, 'Value': '7,0602'},\n {'NumCode': '152', 'CharCode': 'CLP ', 'Name': 'Чилийское песо', 'Convtype': 0,\n 'Value': '862,3000'},\n {'NumCode': '144', 'CharCode': 'LKR ', 'Name': 'Шри-Ланкийская рупия', 'Convtype': 0,\n 'Value': '187,0500'},\n {'NumCode': '232', 'CharCode': 'ERN ', 'Name': 'Эритрейская накфа', 'Convtype': 0,\n 'Value': '15,0750'},\n {'NumCode': '132', 'CharCode': 'CVE ', 'Name': 'Эскудо Кабо-Верде', 'Convtype': 0,\n 'Value': '103,0050'},\n {'NumCode': '230', 'CharCode': 'ETB ', 'Name': 'Эфиопский быр', 'Convtype': 0, 'Value': '32,6000'},\n {'NumCode': '388', 'CharCode': 'JMD ', 'Name': 'Ямайский доллар', 'Convtype': 0,\n 'Value': '135,5000'}]\n\ntest_raw_cbr_usd_2_1conv = [\n {'NumCode': '072', 'CharCode': 'BWP ', 'Name': 'Ботсванская пула', 'Convtype': 1, 'Value': '0,0850'},\n {'NumCode': '292', 'CharCode': 'GIP ', 'Name': 'Гибралтарский фунт', 'Convtype': 1, 'Value': '1,1712'},\n {'NumCode': '090', 'CharCode': 'SBD ', 'Name': 'Доллар Соломоновых Островов', 'Convtype': 1, 'Value': '0,1206'},\n {'NumCode': '242', 'CharCode': 'FJD ', 'Name': 'Доллар Фиджи', 'Convtype': 1, 'Value': '0,4235'},\n {'NumCode': '598', 'CharCode': 'PGK ', 'Name': 'Кина Папуа-Новой Гвинеи', 'Convtype': 1, 'Value': '0,2925'},\n {'NumCode': '554', 'CharCode': 'NZD ', 'Name': 'Новозеландский доллар', 'Convtype': 1, 'Value': '0,5821'},\n {'NumCode': '776', 'CharCode': 'TOP ', 'Name': 'Паанга Королевства Тонга', 'Convtype': 1, 'Value': '0,4344'},\n {'NumCode': '654', 'CharCode': 'SHP ', 'Name': 'Фунт Св. Елены', 'Convtype': 1, 'Value': '1,1654'},\n {'NumCode': '238', 'CharCode': 'FKP ', 'Name': 'Фунт Фолклендских островов', 'Convtype': 1, 'Value': '1,1712'},\n]\n" } ]
17
jpitcher2012/logs-analysis
https://github.com/jpitcher2012/logs-analysis
04481a785ac016312723caf554f65bced4edd975
f9e9a091625d5eb34d63638b9406502bad054c24
52a7853edb74c31d5f4afccca7c340ff29634436
refs/heads/master
2021-05-01T13:15:19.661758
2018-02-13T03:22:08
2018-02-13T03:22:08
121,074,370
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5337159037590027, "alphanum_fraction": 0.5485413670539856, "avg_line_length": 30.208955764770508, "blob_id": "cc8426ec1420f70f0534728758f5b9a8a3636302", "content_id": "6f139b78fc300a282f8b57ef3035c79c99995429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2091, "license_type": "no_license", "max_line_length": 78, "num_lines": 67, "path": "/vagrant/report.py", "repo_name": "jpitcher2012/logs-analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n\"\"\"Print reports on newspaper articles.\"\"\"\n\nimport psycopg2\n\n# Connect to the database and create a cursor for getting the results\nconn = psycopg2.connect(\"dbname=news\")\ncursor = conn.cursor()\n\n# First query - get the 3 most popular articles\nsql1 = \"\"\"\n select a.title, count(*) as views\n from articles as a, log as l\n where l.path = CONCAT('/article/', a.slug)\n group by a.title\n order by views desc\n limit 3;\n \"\"\"\ncursor.execute(sql1)\nresults = cursor.fetchall()\nprint \"-------------------------------------------\"\nprint \"The 3 most popular articles:\"\nfor row in results:\n print \" \", row[0], \"-\", \"{:,}\".format(row[1]), \"views\"\n\n# Second query - get the most popular authors\nsql2 = \"\"\"\n select au.name, count(l.id) as views\n from authors as au, articles as ar, log as l\n where au.id = ar.author\n and l.path = CONCAT('/article/', ar.slug)\n group by au.name\n order by views desc;\n \"\"\"\ncursor.execute(sql2)\nresults = cursor.fetchall()\nprint \"-------------------------------------------\"\nprint \"The most popular authors:\"\nfor row in results:\n print \" \", row[0], \"-\", \"{:,}\".format(row[1]), \"views\"\n\n# Third query - get the days where more than 1% of requests had errors\nsql3 = \"\"\"\n select date, ROUND(percent::numeric,1) || '%' as percent\n from (\n select date, error/(total*1.0)*100 as percent\n from (\n select\n trim(to_char(time,'Month')) || to_char(time,' DD, YYYY') as date,\n count(id) as total,\n count(case when status = '404 NOT FOUND' then id end) as error\n from log\n group by date\n ) as x\n ) as y\n where percent > 1;\n \"\"\"\ncursor.execute(sql3)\nresults = cursor.fetchall()\nprint \"-------------------------------------------\"\nprint \"Days where more than 1% of requests had errors:\"\nfor row in results:\n print \" \", row[0], \"-\", row[1], \"errors\"\nprint \"-------------------------------------------\"\n\n# Close the connection to the database\nconn.close()\n" }, { "alpha_fraction": 0.723100483417511, "alphanum_fraction": 0.7389087080955505, "avg_line_length": 39.020408630371094, "blob_id": "7824e898c2db36cc5df424e9173b0e4b9ea7bc2b", "content_id": "c2fd54531457e9790e20ed003276cc90b1dedeb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1961, "license_type": "no_license", "max_line_length": 189, "num_lines": 49, "path": "/README.md", "repo_name": "jpitcher2012/logs-analysis", "src_encoding": "UTF-8", "text": "# Logs Analysis\nThis program generates a report from a database on newspaper articles.\n\n&nbsp;\n## Prerequisites\n- Install [Python](https://www.python.org/)\n- Install [VirtualBox](https://www.virtualbox.org/)\n- Install [Vagrant](https://www.vagrantup.com/)\n- Install [Git](https://git-scm.com/) (optional - only needed if you want to clone the repository)\n\n&nbsp;\n## Installation\n- **Option 1:** Clone GitHub repository\n - Open a terminal and navigate to where you want to install the program\n - Run the following command to clone the repository:\n \n `git clone https://github.com/jpitcher2012/logs-analysis.git`\n\n&nbsp;\n- **Option 2:** Download ZIP\n - Go to the [repository](https://github.com/jpitcher2012/logs-analysis) in GitHub\n - Click on the \"Clone or download\" button\n - Click \"Download ZIP\"\n \n &nbsp;\n- After installing, [download the data](https://d17h27t6h515a5.cloudfront.net/topher/2016/August/57b5f748_newsdata/newsdata.zip) and put the file `newsdata.sql` in the `vagrant` directory. \n \n&nbsp;\n## Program design\n- This program makes use of a Linux-based virtual machine. It has a PostgreSQL database and necessary support software.\n- The database includes 3 tables: \n - The authors table includes information about the authors of articles\n - The articles table includes the articles themselves.\n - The log table includes one entry for each time a user has accessed the site.\n- There is one python file, which connects to the database, runs 3 queries, and prints the results. \n \n&nbsp;\n## Setting up the database\n- Using the terminal, navigate to where you installed the code (in the `logs-analysis` directory)\n- Open the `vagrant` directory\n- Run the following commands to start up the virtual machine and populate the database:\n - `vagrant up`\n - `vagrant ssh`\n - `cd /vagrant`\n - `psql -d news -f newsdata.sql`\n \n&nbsp;\n## Running the report\n- Run `report.py` while connected to the virtual machine, in the `vagrant` directory " } ]
2
partitude/VTiP
https://github.com/partitude/VTiP
f70318e318aa69bfb5ffea0dc90009c726c54d4d
7a046402fbe6e43092948a4aed89710ebf261c27
6dbb1a62c0a2b883548d03d21b4060cf3f691397
refs/heads/master
2023-01-02T15:24:26.306808
2020-10-28T19:30:39
2020-10-28T19:30:39
297,888,433
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5573770403862, "alphanum_fraction": 0.5655737519264221, "avg_line_length": 18.33333396911621, "blob_id": "f07bc94e76d624e33ce442056c3fa4d376c7ff8c", "content_id": "faf159a33b33fb7d22fdd9aa7b4efa8063a1c48f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/ВТиП 9.3-10.12/10.5.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "x=input(\"Введите число:\")\r\ns = 0\r\nfor i in range(len(x)):\r\n s=s+int(x[i])*int(x[i])\r\n print(s)\r\nprint(\"сумма чисел:\", s)\r\n" }, { "alpha_fraction": 0.4263959527015686, "alphanum_fraction": 0.4720812141895294, "avg_line_length": 12.071428298950195, "blob_id": "d82db99e3263155634271f96961c8403962387b1", "content_id": "aac9c3c8374273041255eb22d849793fd0e7c0b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/ВТиП 5.1-9.2/7.5.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def func_m(x):\r\n \"\"\"\r\n\r\n >>> func_m(2)\r\n 32\r\n\r\n >>> func_m(3)\r\n 145\r\n \r\n \"\"\"\r\n return round (pow(x,4)+pow(4,x))\r\nimport doctest\r\n#роверяет тесты в доках\r\ndoctest.testmod()\r\n" }, { "alpha_fraction": 0.540229856967926, "alphanum_fraction": 0.5977011322975159, "avg_line_length": 16.88888931274414, "blob_id": "03554ee14067bae486f3a9c63f281ad905f9d53a", "content_id": "ce5bd5f58593ba9212ed22d3fafbf7c437e2700d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/ВТиП 5.1-9.2/7.4.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "import mod7_4\r\nprint('введите число от 1 до 10)')\r\nfrom random import randint\r\nx=randint(1,10)\r\ny=int(input())\r\nif x!=y:\r\n mod7_4.f(x)\r\nelse:\r\n print('Победа!!!')\r\n\r\n\r\n" }, { "alpha_fraction": 0.4390243887901306, "alphanum_fraction": 0.4682926833629608, "avg_line_length": 20.55555534362793, "blob_id": "456017ee3e8ac0f12ab7b7e0f8db114982df8e37", "content_id": "8f0ee6ff5c0853584c75aa68cd994ada0dfd8ee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 51, "num_lines": 9, "path": "/ВТиП 9.3-10.12/10.11.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "from random import randint\r\nN=4\r\nx=[]\r\nfor i in range (N):\r\n x.append([0]*N)\r\n for j in range (N):\r\n x[i][j]=randint(1,10)\r\n print ( \"{:4d}\".format(x[i][j]), end = \"\" )\r\n print()\r\n\r\n" }, { "alpha_fraction": 0.5620437860488892, "alphanum_fraction": 0.5766423344612122, "avg_line_length": 22.909090042114258, "blob_id": "c940fdc5304a7a5ca8761910037ca402b1bc43c1", "content_id": "06c3e53143f3776e73d8647a0b804270ab7192be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/ВТиП 5.1-9.2/mod7_3.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "from math import sqrt\r\nfrom math import sin\r\nfrom math import degrees\r\ndef f():\r\n print('введите x')\r\n x=float(input())\r\n y=sqrt(1-pow(degrees(sin(x)),2))\r\n return (y)\r\nif __name__ == \"__main__\":\r\n x=float(input())\r\n print(sqrt(1-pow(degrees(sin(x)),2)))\r\n" }, { "alpha_fraction": 0.35576921701431274, "alphanum_fraction": 0.45192307233810425, "avg_line_length": 11, "blob_id": "d6e4db16aef20316524814640927a494432e2ad2", "content_id": "cf45a8cdae1709d6f6f3e85f98bcb17c8fb43ff2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 27, "num_lines": 8, "path": "/ВТиП 9.3-10.12/10.3.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "L = [-8, 8, 6.0, 5, -3.1,3]\r\nz=len(L)//2\r\na=L[0:z]\r\nprint(a)\r\nb=L[z:len(L)]\r\nprint(b)\r\nr=b+a\r\nprint(r)\r\n" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.46666666865348816, "avg_line_length": 18.799999237060547, "blob_id": "2b2fb587e79f266b058f17ee671bddcd212a2d73", "content_id": "d4e615f4c33b7dc0d20869d4efa86e44a95d97b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/ВТиП 9.3-10.12/10.10.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "from random import randint\r\nN=4\r\nM=5\r\nx=[]\r\nfor i in range (N):\r\n x.append([0]*M)\r\n for j in range (M):\r\n x[i][j]=randint(1,10)\r\n print ( \"{:4d}\".format(x[i][j]), end = \"\" )\r\n print()\r\n\r\n" }, { "alpha_fraction": 0.375, "alphanum_fraction": 0.4097222089767456, "avg_line_length": 13.777777671813965, "blob_id": "2b2bce0bf69fc9b319aa3f48fbc6b4e8c6da4aca", "content_id": "cee4e8f802d2bc69bd7ac8251c6873cd8e4e8bf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 30, "num_lines": 9, "path": "/ВТиП 5.1-9.2/5.1.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def f(x):\r\n if (x>=-2.4) and (x<=5.7):\r\n print (x*x)\r\n else :\r\n print('4')\r\n\r\nprint('введите x')\r\nx=float(input())\r\nf(x)\r\n\r\n" }, { "alpha_fraction": 0.4569190740585327, "alphanum_fraction": 0.4725848436355591, "avg_line_length": 19.16666603088379, "blob_id": "9f2f8a2b62fea04261a8dc98e61b55695d2bf354", "content_id": "6a723ea337aef81dd5aeceecdf80880d9b693966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 38, "num_lines": 18, "path": "/ВТиП 5.1-9.2/mod7_4.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def f(x):\r\n \"\"\"запускается при неудаче\"\"\"\r\n print('Повторите еще раз')\r\n v=int(input())\r\n if v!=x:\r\n f(x)\r\n else:\r\n print('Победа')\r\n return v\r\nif __name__ == \"__main__\":\r\n print('введите число от 1 до 10)')\r\n from random import randint\r\n x=randint(1,10)\r\n y=int(input())\r\n if x!=y:\r\n f(x)\r\n else:\r\n print('Победа')\r\n\r\n" }, { "alpha_fraction": 0.4482758641242981, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 11.5, "blob_id": "229f199aa33d6d95c9797a2de2fec744b831bf7e", "content_id": "4037790a57849c68c7ce57a0c8d0bfbf4bbc457d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/ВТиП 5.1-9.2/7.2.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "import mod7_2\r\nmod7_2.s()\r\n\r\n" }, { "alpha_fraction": 0.5094339847564697, "alphanum_fraction": 0.5353773832321167, "avg_line_length": 20.3157901763916, "blob_id": "b0c331141a646c297eb10e9542db9e3c846b0ae0", "content_id": "602ecd8d81b09742c8febc3b9a04c0f379aa884d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 40, "num_lines": 19, "path": "/ВТиП 5.1-9.2/8.1.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "s = \"У лукоморья 123 дуб зеленый 456\"\r\ne=s.count('я')\r\nif e>0:\r\n print('буква \"я\" есть, ее индекс:')\r\n print(s.find ('я'))\r\nelse:\r\n print('буквы \"я\" нет')\r\n e1=s.count('у')\r\nif s.count('у')>0:\r\n print('буква \"у\" есть, количество:')\r\n print(s.count('у'))\r\nelse:\r\n print('буквы \"у\" нет')\r\nif s.isalpha()== False:\r\n print(s.upper())\r\nl=len(s)\r\nif l>4:\r\n print(s.lower())\r\nprint(s.replace(s[0],'O'))\r\n" }, { "alpha_fraction": 0.3478260934352875, "alphanum_fraction": 0.42391303181648254, "avg_line_length": 19.5, "blob_id": "85692246260edd9926ff1d977bd67c2047b9d854", "content_id": "adb959333f6d1ce3d437a348c06b08aa507a8841", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/ВТиП 9.3-10.12/10.1.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def y(x):\r\n return x**2+3\r\nfor i in range (10,30,2):\r\n print('y[',i,']=',y(i))\r\n \r\n" }, { "alpha_fraction": 0.4475524425506592, "alphanum_fraction": 0.4755244851112366, "avg_line_length": 17.571428298950195, "blob_id": "2bf7c1d9b15e7940c19431fec35dfa0e5fb0f6df", "content_id": "a2a5e09486a2098dc206d002c39a8b4524376359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/ВТиП 5.1-9.2/5.3.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def chet(a):\r\n if a%2==1:\r\n print('число нечётное')\r\n elif a%2==0:\r\n print('число чётное')\r\na=int(input())\r\nchet(a)\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.4618473947048187, "alphanum_fraction": 0.4658634662628174, "avg_line_length": 23.894737243652344, "blob_id": "5ed87024eed919bb1d0b3ccc47e9eab9b30e54c9", "content_id": "d67c13afef75aca2033d1d89413763770277769f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 43, "num_lines": 19, "path": "/ВТиП 9.3-10.12/10.4.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "from random import randint\r\nrand=randint(1,5)\r\nt=False\r\nx=int(input('введите число'))\r\nif x==rand:\r\n print('Победа')\r\nelse:\r\n while t==False:\r\n if x>rand:\r\n print('меньше,попробуйте еще)')\r\n x=int(input('введите число'))\r\n if x==rand:\r\n t=True \r\n else:\r\n print('больше,попробуйте еще)')\r\n x=int(input('введите число'))\r\n if x==rand:\r\n t=True \r\nprint('Победа')\r\n \r\n" }, { "alpha_fraction": 0.4479166567325592, "alphanum_fraction": 0.5173611044883728, "avg_line_length": 19.69230842590332, "blob_id": "f53ecbb7ad3d8cc90447628a513564d14f449b68", "content_id": "e214bc0f3013fc729f75cadfc04d0e4e21449774", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 49, "num_lines": 13, "path": "/ВТиП 5.1-9.2/5.5.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def money(kod,t):\r\n if kod==343:\r\n print( t*15)\r\n if kod==381:\r\n print (t*18)\r\n if kod==473:\r\n print (t*13)\r\n if kod==485:\r\n print (t*11)\r\nprint('введите код города и длительность звонка')\r\nkod=float(input())\r\nt=float(input())\r\nmoney(kod,t)\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.49193549156188965, "alphanum_fraction": 0.5080645084381104, "avg_line_length": 22.799999237060547, "blob_id": "a7b06a6019a80f923700aa70a1f8e57db2e827bb", "content_id": "7747d635eb40f5ab8451ce09883b7110e67f2e56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 75, "num_lines": 10, "path": "/ВТиП 5.1-9.2/up8_2.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def ss(s,n):\r\n if len(s)>n:\r\n print(s.upper())\r\n else:\r\n print(s)\r\n \"\"\"Возвращает строку в верхнем регистре если длина строки s больше n\"\"\"\r\nif __name__==\"__main__\":\r\n s1 =str(input())\r\n n1=int(input())\r\n ss(s1,n1)\r\n" }, { "alpha_fraction": 0.6878980994224548, "alphanum_fraction": 0.7388535141944885, "avg_line_length": 29.399999618530273, "blob_id": "df231121639f2d53694e04fee44833bad25f93a7", "content_id": "54aed8ffdb08f3a35f7eb7c00e9eaa337944c6ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 57, "num_lines": 5, "path": "/ВТиП 9.3-10.12/10.9.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "s='hsrga843ta8gadskagwi2fewkgenh9reahksgnf2nfnsdgwkgmeo5'\r\na_list=[int(x) for x in s if x.isdigit()]\r\nprint(a_list)\r\nprint(sum(a_list))\r\nprint(max(a_list))\r\n" }, { "alpha_fraction": 0.569767415523529, "alphanum_fraction": 0.5930232405662537, "avg_line_length": 17.11111068725586, "blob_id": "a92eb4211219b0b592f0635d242beb720dc48d80", "content_id": "e393a3edfdd96cee0a6014aaa5dfab36f2240734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 30, "num_lines": 9, "path": "/ВТиП 9.3-10.12/9.5.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "L=['самовар', 'весна', 'лето']\r\nfrom random import randint\r\nr=L[randint(0,2)]\r\nb=r[randint(0,len(r)-1)]\r\ni=r.index(b)\r\nn=list(r)\r\nn[i]='?'\r\nstrok=''.join(n)\r\nprint(strok)\r\n" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 13.5, "blob_id": "4d0fba6647fb31e47988a27375e87ec2c6441cf7", "content_id": "02f674cb5c95ab661e7bf52395c097ab3c842ed3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 15, "num_lines": 4, "path": "/ВТиП 5.1-9.2/8.2.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "import up8_2\r\ns =str(input())\r\nn=int(input())\r\nup8_2.ss(s,n)\r\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 15, "blob_id": "1ea3aa4fc57f00d353fb66e50742276d858df5c0", "content_id": "fbd663b5dc0f60a42913e64baab48e5a3636ee25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/ВТиП 5.1-9.2/7.3.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "import mod7_3\r\nprint(mod7_3.f())\r\n" }, { "alpha_fraction": 0.5364583134651184, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 17, "blob_id": "3febd5ddb8bf427ab1cb42adcb1d6b8f488fffde", "content_id": "b056c3aad67f6247883cf0943fa609945952b64c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/ВТиП 5.1-9.2/5.4.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def s(a,b,c):\r\n p=(a+b+c)/2\r\n return sqrt(p*(p-a)*(p-b)*(p-c))\r\n\r\nprint('введите a,b,c')\r\nfrom math import sqrt\r\na=float(input())\r\nb=float(input())\r\nc=float(input())\r\nprint(s(a,b,c))\r\n\r\n" }, { "alpha_fraction": 0.5177865624427795, "alphanum_fraction": 0.5691699385643005, "avg_line_length": 17.461538314819336, "blob_id": "3dee2c1c156928da8323cd51c3b458cf33ff841f", "content_id": "bd9e23630df792941360879db3ad130ec4245579", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 31, "num_lines": 13, "path": "/ВТиП 5.1-9.2/9.1.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "L = [3, 6, 7, 4, -5, 4, 3, -1]\r\ne=sum(L)\r\nprint('сумма=',e)\r\nif e>2:\r\n print(len(L))\r\nmaxl=max(L)\r\nminl=min(L)\r\nprint('минимальный =',minl)\r\nprint('максимальный=',maxl)\r\nif maxl-minl>10:\r\n print(sorted(L))\r\nelse:\r\n print('разность меньше 10')\r\n" }, { "alpha_fraction": 0.48569726943969727, "alphanum_fraction": 0.5476757884025574, "avg_line_length": 28.925926208496094, "blob_id": "645d5977600041dd745980d62f1e4a349dc7a914", "content_id": "be910c67f56de91c5cb16615f0c17de34a45ee28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2031, "license_type": "no_license", "max_line_length": 67, "num_lines": 54, "path": "/ВТиП 9.3-10.12/10.12.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "from random import randint\r\nN=3\r\nM=5\r\nx=[]\r\nk=1\r\nsov0=0\r\nsov1=0\r\nsov2=0\r\nfor i in range (N):\r\n x.append([0]*M)\r\n for j in range (M):\r\n x[i][j]=randint(1,10)\r\n print ( \"{:4d}\".format(x[i][j]), end = \"\" )\r\n print()\r\nfor i in range (N):\r\n for j in range (M):\r\n for k in range (M):\r\n if x[i][j]==x[i][k]:\r\n if (i==0)and(k!=j):\r\n sov0=sov0+1\r\n if (i==1)and(k!=j):\r\n sov1=sov1+1\r\n if (i==2)and(k!=j):\r\n sov2=sov2+1\r\nif (sov0%3==0)and(sov0%2==0)and(sov0/6==1):\r\n sov0=sov0/2\r\nif (sov1%3==0)and(sov1%2==0)and(sov0/6==1):\r\n print(\"chtoto\", sov1)\r\n sov1=sov1/2\r\nif (sov2%3==0)and(sov2%2==0)and(sov0/6==1):\r\n print(\"chtoto\", sov2)\r\n sov2=sov2/2\r\nprint(\"Одинаковых чисел в строке 1:\",sov0)\r\nprint(\"Одинаковых чисел в строке 2:\",sov1)\r\nprint(\"Одинаковых чисел в строке 3:\",sov2)\r\nif sov0>sov1:\r\n if sov0>sov2:\r\n print(\"В 1 строке больше всего одинаковых чисел\")\r\n else:\r\n print(\"В 3 строке больше всего одинаковых чисел\")\r\nelse:\r\n if sov1>sov2:\r\n print(\"В 2 строке больше всего одинаковых чисел\")\r\n elif sov2>sov0:\r\n print(\"В 3 строке больше всего одинаковых чисел\")\r\nif (sov0==sov1)and(sov2==sov0):\r\n print(\"В строках одинаковое кол-во одинаковых чисел\")\r\nelse:\r\n if (sov0==sov1)and(sov2<sov0):\r\n print(\"В строках 1 и 2 одинаковое кол-во одинаковых чисел\")\r\n if (sov1==sov2)and(sov0<sov2):\r\n print(\"В строках 2 и 3 одинаковое кол-во одинаковых чисел\")\r\n if (sov0==sov2)and(sov1<sov2):\r\n print(\"В строках 1 и 3 одинаковое кол-во одинаковых чисел\")\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5465116500854492, "alphanum_fraction": 0.6162790656089783, "avg_line_length": 26.66666603088379, "blob_id": "d7039e3cb4d8d115fda6a0222e4896cdb6e629d9", "content_id": "cd89dba1b268bd0e18f081dbd8e655dd82efb59b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/ВТиП 5.1-9.2/7.1.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "import Ger\r\nx1, x2, x3 = map(float, input().split( ))\r\nprint(Ger.square(x1, x2, x3))\r\n" }, { "alpha_fraction": 0.6243094205856323, "alphanum_fraction": 0.6408839821815491, "avg_line_length": 30.909090042114258, "blob_id": "afad5487af5ba24e2efcfd0c19bcdfdf6770764f", "content_id": "959b486d5a02d840212f58683a3dbd12d1507710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 113, "num_lines": 11, "path": "/ВТиП 9.3-10.12/10.8.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "text = 'Мой дядя самых честных правил Когда не в шутку занемог Он уважать себя заставил И лучше выдумать не мог'\r\nprint(text)\r\nmaximum = 0\r\nx=0\r\nfor index,word in enumerate(text.split()):\r\n if len(word) > maximum:\r\n maximum = len(word)\r\n print (word)\r\n x1=x\r\n x=x+1\r\nprint(f'В самом длинном слове {maximum} букв и это {x1+1} слово')\r\n" }, { "alpha_fraction": 0.47894737124443054, "alphanum_fraction": 0.47894737124443054, "avg_line_length": 15.272727012634277, "blob_id": "89020c60fc659c6c6f74bff9eb36ddd9b65f5862", "content_id": "d7ff673b78eb321a19f4038b897985f99466d652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 34, "num_lines": 11, "path": "/ВТиП 5.1-9.2/5.2.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def max(a,b):\r\n if a>b:\r\n return a\r\n elif b>a:\r\n return b\r\n elif a==b:\r\n return ('числа одинаковы')\r\na=int(input())\r\nb=int(input())\r\nmax(a,b)\r\nprint(max(a,b))\r\n" }, { "alpha_fraction": 0.6067415475845337, "alphanum_fraction": 0.6217228174209595, "avg_line_length": 18.538461685180664, "blob_id": "e46e3927a435a37b54c118f4240712f0b5049a90", "content_id": "0c95b83754d7264fcb27127bf84494f66f44aa4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 33, "num_lines": 13, "path": "/ВТиП 5.1-9.2/7.8.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "import random\r\nprint('У первого выпало:')\r\na = random.randint(1, 6)\r\nprint(a)\r\nprint('У второго выпало:')\r\nb = random.randint(1, 6)\r\nprint(b)\r\nif a > b:\r\n print('Победил первый игрок')\r\nelif a==b:\r\n print('Ничья')\r\nelif a<b:\r\n print('Победил второй игрок')\r\n" }, { "alpha_fraction": 0.4065934121608734, "alphanum_fraction": 0.4670329689979553, "avg_line_length": 20.75, "blob_id": "a001f3db928e48654029c322f9a4e4a133199110", "content_id": "1687e1f978b16e68eb0e1cceba6e9646ec162b72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/ВТиП 9.3-10.12/10.2.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "L = [-8, 8, 6.0, 5, 'строка', -3.1]\r\nsum=0\r\ni=0\r\nfor i in range(0,len(L)-1):\r\n if (type(L[i])==int) or (type(L[i])==float):\r\n print(sum)\r\n sum=sum+L[i]\r\nprint(sum)\r\n" }, { "alpha_fraction": 0.5920000076293945, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 18.83333396911621, "blob_id": "b822571c02352bceebe7b20d436c89c1db71696b", "content_id": "51335d4ccd5efde6d1e699ebe13fda917c450791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 55, "num_lines": 6, "path": "/ВТиП 9.3-10.12/10.6.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "ch=input(\"Вводите числа, для остановки введите `стоп`\")\r\ns=0\r\nwhile ch !=\"стоп\":\r\n s=s+int(ch)\r\n ch=input()\r\nprint(s)\r\n" }, { "alpha_fraction": 0.5064935088157654, "alphanum_fraction": 0.5411255359649658, "avg_line_length": 19, "blob_id": "f79af4275f7174e98472fbaf5348420778a82203", "content_id": "f5f462a4009ff51acfee3db8eed8bf39361064ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/ВТиП 5.1-9.2/7.6.py", "repo_name": "partitude/VTiP", "src_encoding": "UTF-8", "text": "def a(x,y):\r\n return x+(2+y)/pow(x,2)\r\ndef b(x,y):\r\n from math import sqrt\r\n return y+1/sqrt(pow(x,2)+10)\r\nx=int(input())\r\ny=int(input())\r\nz=a(x,y)/b(x,y)\r\nfrom math import sin\r\nq=2.8*sin(x)+abs(y)\r\nprint('z=',z,' q=',q)\r\n" } ]
30
zhouxzh/doa
https://github.com/zhouxzh/doa
a6216a6e773e15e90793b595517fdb097b23e601
25a82c460d4df7cc403d70280ac2ddbb72ca148d
643148aca3288af6129b2c1c0775de7910d2dbcf
refs/heads/master
2020-05-15T09:50:57.651545
2019-05-15T10:42:11
2019-05-15T10:42:11
182,183,326
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5186606049537659, "alphanum_fraction": 0.5793512463569641, "avg_line_length": 23.427350997924805, "blob_id": "27f0bf275182aeed3588b69f9cc8603586c40884", "content_id": "a757e688a80f7e6abbad92232c9934e536b49b77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2867, "license_type": "no_license", "max_line_length": 79, "num_lines": 117, "path": "/matrix_doa.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom matrix_lite import led\nimport time\nimport wave\nimport numpy as np\nimport pyroomacoustics as pra\nfrom scipy.io import wavfile\nimport queue\nimport sounddevice as sd\n\n# algorithms parameters\nc = 343. # speed of sound\nfs = 16000 # sampling frequency\nCHANNELS = 8\nRECORD_SECONDS = 3\nFRAMES = 16\n\n#Position [x,y] of each mic in the array (mm)\n#Mic\tX\tY\n#M1\t00.00\t0.00\n#M2\t-38.13\t3.58\n#M3\t-20.98\t32.04\n#M4\t11.97\t36.38\n#M5\t35.91\t13.32\n#M6\t32.81\t-19.77\n#M7\t5.00\t-37.97\n#M8\t-26.57\t-27.58\nR = [[0, -38.13, -20.98, 11.97, 35.91, 32.81, 5.00, -26.57],\n [0, 3.58, 32.04, 36.38, 13.32, -19.77, -37.97, -27.58]]\nR = np.array(R)\nR[0] = -R[0]\nR = R/1000\nprint(R)\n\n\nq = queue.Queue(FRAMES)\n\ndef audio_callback(indata, frames, time, status):\n if status:\n print(status, file=sys.stderr)\n if q.full():\n q.get()\n else:\n q.put(indata)\n\ndef show_direction(angle):\n #print('The angle is {}'.format(angle))\n direction = int(angle // (360/18))\n image = ['blue']*led.length\n image[direction] = 'red'\n led.set(image)\n \ndef led_off():\n print('shut down the LED')\n led.set('black')\n\n\n\ndef update_singal():\n global source_signal\n while not q.empty():\n data = q.get_nowait()\n shift = len(data)\n source_signal = np.roll(source_signal, -shift, axis=0)\n source_signal[-shift:, :] = data\n\ndef matrix_doa():\n global source_signal\n #print(source_signal)\n #algo_names = ['SRP', 'MUSIC', 'TOPS', 'CSSM', 'WAVES']\n algo_name = 'SRP'\n #print('The algorithms {} will be used.'.format(algo_name))\n nfft = 256 # FFT size\n ################################\n # Compute the STFT frames needed\n X = np.array([ \n pra.stft(source_signal[:,i], nfft, nfft // 2, transform=np.fft.rfft).T \n for i in range(CHANNELS) ])\n\n ##############################################\n # Construct the new DOA object\n # the max_four parameter is necessary for FRIDA only\n doa = pra.doa.algorithms[algo_name](R, fs, nfft, c=c)\n\n # this call here perform localization on the frames in X\n doa.locate_sources(X, freq_range=[1000, 3000])\n\n # doa.azimuth_recon contains the reconstructed location of the source\n angle = doa.azimuth_recon / np.pi * 180\n print(' Recovered azimuth:', angle, 'degrees')\n return(angle)\n\n\n\n#######################\n\n\ntry:\n source_signal = np.random.random((256*FRAMES, CHANNELS))\n stream = sd.InputStream(\n device=3,\n channels=8,\n samplerate=fs,\n callback=audio_callback)\n with stream:\n print('DOA starting')\n for i in range(int(100)):\n update_singal()\n angle = matrix_doa()\n print(angle)\n show_direction(angle)\n led_off()\nexcept KeyboardInterrupt:\n led_off()\n print('DOA finished')\n exit(0)\n\n \n\n\n\n" }, { "alpha_fraction": 0.6205533742904663, "alphanum_fraction": 0.6379446387290955, "avg_line_length": 20.440677642822266, "blob_id": "55f4e3c0ddd2be8ad41689ec850a3c4fdbfa9c6b", "content_id": "d843b5526fd25c5240d206ca264c25d9be324b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1265, "license_type": "no_license", "max_line_length": 69, "num_lines": 59, "path": "/matrix_record.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom matrix_lite import led\nimport time\nimport pyaudio\nimport wave\n\n\ndef show_direction(angle):\n print('The angle is {}'.format(angle))\n direction = int(angle // (360/18))\n image = ['blue']*led.length\n image[direction] = 'red'\n led.set(image)\n\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 8\nRATE = 16000\nRECORD_SECONDS = 5\nWAVE_OUTPUT_FILENAME = \"output.wav\"\n\np = pyaudio.PyAudio()\nfor i in range(p.get_device_count()):\n dev = p.get_device_info_by_index(i)\n name = dev['name'].encode('utf-8')\n print(i, name, dev['maxInputChannels'], dev['maxOutputChannels'])\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK,\n input_device_index=3)\n\nprint(\"* recording\")\n\nframes = []\n\nfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\nprint(\"* done recording\")\nstream.stop_stream()\nstream.close()\np.terminate()\n\nwf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwf.setnchannels(CHANNELS)\nwf.setsampwidth(p.get_sample_size(FORMAT))\nwf.setframerate(RATE)\nwf.writeframes(b''.join(frames))\nwf.close()\n\n\n\n\n#if __name__ == '__main__':\n# find_audio_index()\n" }, { "alpha_fraction": 0.6056782603263855, "alphanum_fraction": 0.6182965040206909, "avg_line_length": 20.133333206176758, "blob_id": "3c6c690bb7d923d9b91aa0c9cefb595762c5ac13", "content_id": "a606c3fbb1e55b3cc756b63cbda31d6f836662d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "no_license", "max_line_length": 60, "num_lines": 15, "path": "/find_mic_read_wav.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom scipy.io import wavfile\nimport numpy as np\nfrom gcc_phat import gcc_phat\n\nfs, data = wavfile.read('song.wav')\nprint(data.shape)\ntau = np.zeros((4,4))\nfor i in range(4):\n for j in range(4):\n tau[i, j], _ = gcc_phat(data[:,i], data[:,j], fs=fs)\n print(tau[i,j])\n\nprint(tau)\n" }, { "alpha_fraction": 0.564039409160614, "alphanum_fraction": 0.5935960412025452, "avg_line_length": 19.299999237060547, "blob_id": "5eed753f6b103ea0ea9f7a43ffe993749f0f023e", "content_id": "a39ca2baf01000f7b8da799280a9fe12de3e0184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 42, "num_lines": 20, "path": "/matrix_led.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom matrix_lite import led\nimport time\n\n\ndef show_direction(angle):\n print('The angle is {}'.format(angle))\n direction = int(angle // (360/18))\n image = ['blue']*led.length\n image[direction] = 'red'\n led.set(image)\n\n\nif __name__ == '__main__':\n led.set('black')\n for angle in range(360):\n show_direction(angle)\n time.sleep(0.05)\n led.set('black')\n" }, { "alpha_fraction": 0.5939947962760925, "alphanum_fraction": 0.6174934506416321, "avg_line_length": 21.52941131591797, "blob_id": "668de1fcc635f3f2d22b682ef49185c2452538d0", "content_id": "70ddedb73b0b6196c3f25da8faa72964204c25e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 72, "num_lines": 34, "path": "/find_mic.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom scipy.io import wavfile\nimport numpy as np\nfrom gcc_phat import gcc_phat\nfrom mic_array import MicArray\n\nimport signal\nimport threading\n\nis_quit = threading.Event()\ndef signal_handler(sig, num):\n is_quit.set()\n print('Quit')\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nfs = 16000\n\nfrom pixels import Pixels, pixels\n\nwith MicArray(fs, 4, fs) as mic:\n for chunk in mic.read_chunks():\n direction = mic.get_direction(chunk)\n print(int(direction))\n pixels.wakeup(direction)\n tau = np.zeros((4,4))\n for i in range(4):\n for j in range(4):\n tau[i, j], _ = gcc_phat(chunk[i::4], chunk[j::4], fs=fs)\n if is_quit.is_set():\n break\n\n print(tau*343*100)\n" }, { "alpha_fraction": 0.5798376202583313, "alphanum_fraction": 0.6278755068778992, "avg_line_length": 24.465517044067383, "blob_id": "5248447f01072e075bf2981dd05d89c58689f8fa", "content_id": "1126a0c3da018f1590998d98812fa76568e45c23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1478, "license_type": "no_license", "max_line_length": 103, "num_lines": 58, "path": "/fire_doa.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport numpy as np\nimport pyroomacoustics as pra\nfrom pyroomacoustics.doa import circ_dist\nfrom mic_array import MicArray\nimport signal\nimport time\nfrom pixels import Pixels, pixels\nimport threading\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, clear_output\nMIC_DISTANCE_4 = 0.08127\n\n\n#R = pra.circular_2D_array([0,0], 4, 0, MIC_RADIUS)\n#R = np.array([[-1, 0, 1, 0], [0, 1, 0, -1]]) * MIC_DISTANCE_4/2\nR = np.array([[-1, 1, 1, -1], [-1, -1, 1, 1]]) * MIC_DISTANCE_4 / 2 / 2**0.5\nprint(R)\n\n\nis_quit = threading.Event()\n\ndef signal_handler(sig, num):\n is_quit.set()\n print('Quit')\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nc = 343\nfs = 16000\nnfft = 512\n\n\n#Possible dos algorithms: SRP, MUSIC, TOPS, CSSM, WAVES\ndoa = pra.doa.algorithms['SRP'](R, fs, nfft, c=c)\n\n\n\nplt.figure()\nwith MicArray(fs, 4, fs/4) as mic:\n start = time.time()\n for chunk in mic.read_chunks():\n #print(chunk.shape)\n #pixels.wakeup(np.random.randint(0, 360, 1))\n\n X = np.array([pra.stft(chunk[i::4], nfft, nfft//2, transform=np.fft.rfft).T for i in range(4)])\n doa.locate_sources(X, freq_range=[500, 3000])\n direction = doa.azimuth_recon / np.pi * 180\n print('Time: ', time.time()-start, ' Recovered azimuth: ', direction)\n pixels.wakeup(direction)\n #plt.close()\n #doa.polar_plt_dirac()\n #plt.draw()\n #plt.pause(0.0001)\n\n if is_quit.is_set():\n break\n\n" }, { "alpha_fraction": 0.5268630981445312, "alphanum_fraction": 0.5424609780311584, "avg_line_length": 19.60714340209961, "blob_id": "97284143f98e4a2d560f430474e259ec23833957", "content_id": "21f14b123299fbaa66c564e4fbb07cf7d1028ae9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/pixels_direction.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "import time\nfrom pixels import Pixels, pixels\nfrom fire_led_pattern import FireLedPattern\n\nif __name__ == '__main__':\n\n pixels.pattern = FireLedPattern(show=pixels.show)\n\n for direction in range(360):\n pixels.wakeup(direction)\n time.sleep(0.1)\n\n while True:\n\n try:\n pixels.wakeup()\n time.sleep(3)\n pixels.think()\n time.sleep(3)\n pixels.speak()\n time.sleep(6)\n pixels.off()\n time.sleep(3)\n except KeyboardInterrupt:\n break\n\n\n pixels.off()\n" }, { "alpha_fraction": 0.5772104859352112, "alphanum_fraction": 0.6189290285110474, "avg_line_length": 26.672412872314453, "blob_id": "68fe03ad2947c5ba94cc9eac4b25a6bce7db745e", "content_id": "d9f2a8508bae311bdf065014f1d7e1c79375902f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1606, "license_type": "no_license", "max_line_length": 104, "num_lines": 58, "path": "/stft/stft_fire.py", "repo_name": "zhouxzh/doa", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport numpy as np\nimport pyroomacoustics as pra\nfrom pyroomacoustics.doa import circ_dist\nfrom mic_array import MicArray\nimport signal\nimport time\nimport threading\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, clear_output\nMIC_DISTANCE_4 = 0.08127\n\n\n#R = pra.circular_2D_array([0,0], 4, 0, MIC_RADIUS)\n#R = np.array([[-1, 0, 1, 0], [0, 1, 0, -1]]) * MIC_DISTANCE_4/2\nR = np.array([[-1, 1, 1, -1], [-1, -1, 1, 1]]) * MIC_DISTANCE_4 / 2 / 2**0.5\nprint(R)\n\n\nis_quit = threading.Event()\n\ndef signal_handler(sig, num):\n is_quit.set()\n print('Quit')\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nc = 343\nfs = 16000\nnfft = 256\n\n\n#Possible dos algorithms: SRP, MUSIC, TOPS, CSSM, WAVES\ndoa = pra.doa.algorithms['SRP'](R, fs, nfft, c=c)\n\n\nimport scipy as sp\nplt.figure()\nwith MicArray(fs, 4, fs/4) as mic:\n start = time.time()\n for i, chunk in enumerate(mic.read_chunks()):\n x = chunk[0::4]\n f, t, Zxx = sp.signal.stft(x, fs, nperseg=1000)\n plt.pcolormesh(t, f, np.abs(Zxx))\n plt.title('STFT Magnitude')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n plt.savefig('fire_stft_%d.png'%i)\n print('This is %d image'%i)\n #X = np.array([pra.stft(chunk[i::4], nfft, nfft//2, transform=np.fft.rfft).T for i in range(4)])\n #doa.locate_sources(X, freq_range=[500, 3000])\n #direction = doa.azimuth_recon / np.pi * 180\n #print('Time: ', time.time()-start, ' Recovered azimuth: ', direction)\n #pixels.wakeup(direction)\n\n if is_quit.is_set():\n break\n\n" } ]
8
brunojhovany/bulkmailing
https://github.com/brunojhovany/bulkmailing
898fa75221086a3f1a710b9a149ff34f77bfd394
51053cf915f219215badc80d1053c3093ce01c01
26603459124b12ff889280bf929097f5b3d54ab6
refs/heads/master
2023-08-16T03:49:46.793056
2021-09-11T12:54:03
2021-09-11T12:54:03
405,075,882
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4420289993286133, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 14.333333015441895, "blob_id": "983f8337a48a68969ae4537decfc6b71dce56676", "content_id": "5ec4035ee922cec6c8eaf84d0991d270e7ec72dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 138, "license_type": "no_license", "max_line_length": 22, "num_lines": 9, "path": "/requirements.txt", "repo_name": "brunojhovany/bulkmailing", "src_encoding": "UTF-8", "text": "et-xmlfile==1.1.0\nloguru==0.5.3\nnumpy==1.21.2\nopenpyxl==3.0.7\npandas==1.3.2\npython-dateutil==2.8.2\npytz==2021.1\nPyYAML==5.4.1\nsix==1.16.0\n" }, { "alpha_fraction": 0.5821917653083801, "alphanum_fraction": 0.5958904027938843, "avg_line_length": 17.125, "blob_id": "ec0de1b04a07d2f9b36a7c820de5adccd6c804ac", "content_id": "2c27ca511841ec8ea19c60224cf5f52aeb9a9acd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 146, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/docker-compose.yaml", "repo_name": "brunojhovany/bulkmailing", "src_encoding": "UTF-8", "text": "version: '3.6'\n\nservices:\n bulk-mailing:\n build: .\n command: tail -F anything # only for development\n volumes:\n - ./:/usr/src/app\n\n" }, { "alpha_fraction": 0.6294070482254028, "alphanum_fraction": 0.635817289352417, "avg_line_length": 29.45121955871582, "blob_id": "659aa950845e3b37eec6489b580fd0e7bf927ed5", "content_id": "bfeaa04aecea7b5e7bc999bf7ba5b2941fe8178d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2496, "license_type": "no_license", "max_line_length": 102, "num_lines": 82, "path": "/app.py", "repo_name": "brunojhovany/bulkmailing", "src_encoding": "UTF-8", "text": "import time\nimport ssl\nimport smtplib\nfrom loguru import logger\nfrom yaml import safe_load\nfrom datetime import datetime\nfrom yaml.error import YAMLError\nfrom pandas import read_excel, DataFrame\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n\nconfig = None\ndate_now = datetime.now()\ndate_str = date_now.strftime('%Y-%m-%d')\nlogger.add(f'logs/file_{date_str}.log', rotation=\"12:00\", level='ERROR')\n\n\nwith open('config.yaml', 'r') as stream:\n try:\n config = safe_load(stream)\n config = config['config']\n except YAMLError as err:\n err_inf = f'Error al cargar el archivo de configuracion | {err}'\n logger.error(err_inf)\n raise Exception(err_inf)\n\n\n\ndef sendMail():\n email_config = config['email']\n email_sender = email_config['user_name']\n email_password = email_config['password']\n\n smtp_ssl_config = email_config['SMTP_SSL']\n context = ssl.create_default_context()\n server = smtplib.SMTP_SSL(smtp_ssl_config['host'], int(smtp_ssl_config['port']), context=context )\n server.ehlo()\n try:\n server.login(user=email_sender,password= email_password)\n logger.info('Inicio de sesion con la cuenta de correo exitoso.')\n except smtplib.SMTPAuthenticationError as err:\n err_inf = f'Error al iniciar sesion en la cuenta de correo | {err}'\n logger.error(err_inf)\n raise Exception(err_inf)\n\n data :DataFrame = read_excel('data/example-data.xlsx')\n \n with open('creative/preview.html', 'r', encoding=\"utf8\") as file:\n creative = file.read().replace('\\n', '')\n\n\n html = creative\n email_data = email_config['email_data']\n message = MIMEMultipart(\"alternative\")\n message[\"From\"] = email_sender\n message.attach(MIMEText(html, \"html\"))\n count = 0\n\n for index, element in data.iterrows():\n print(index, element)\n name_reader: str = element['First Name']\n message[\"Subject\"] = f'{name_reader} ' + email_data['subject']\n count += 1\n logger.info(str(count) + \". Sent to \" + element['Email'])\n\n server.sendmail(\n email_sender, data['Email'], message.as_string()\n )\n\n if(count%80 == 0):\n server.quit()\n print(\"Server cooldown for 100 seconds\")\n time.sleep(100)\n server.ehlo()\n server.login(email_sender, email_password)\n server.quit()\n\n\nif __name__ == '__main__':\n logger.info('Iniciando aplicacion..')\n sendMail()" }, { "alpha_fraction": 0.6605504751205444, "alphanum_fraction": 0.6788991093635559, "avg_line_length": 11.11111068725586, "blob_id": "6897ec32a7981a2c360557fa5c486dbc2c729dd9", "content_id": "8c78865d02d56c29aac8b943382d3480ae94986d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 109, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/Dockerfile", "repo_name": "brunojhovany/bulkmailing", "src_encoding": "UTF-8", "text": "FROM python:3.8\n\nWORKDIR /usr/src/app\n\nCOPY . .\n\nRUN pip install -r requirements.txt\n\nCMD [\"pthon\",\"app.py\"]\n" } ]
4
majagarbulinska/pyro-cov
https://github.com/majagarbulinska/pyro-cov
98ef7fc9716692fccb4d9028c81c1e4e47f49e8e
fdbd37843618a3269b24430b8e66536583773046
b6c7ff1b2f048d3523b591ae56227be88f701b2c
refs/heads/master
2023-08-23T05:02:17.030999
2021-11-02T14:40:33
2021-11-02T14:40:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6140190362930298, "alphanum_fraction": 0.6186003684997559, "avg_line_length": 37.293861389160156, "blob_id": "f6533f04a8dbde8ab3df8f18609b34a5a202ee0e", "content_id": "adf0cc72236d8fb0abccc2db3c6ce503f49e2406", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8737, "license_type": "permissive", "max_line_length": 87, "num_lines": 228, "path": "/pyrocov/geo.py", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Copyright Contributors to the Pyro-Cov project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport logging\nimport os\nimport typing\nfrom collections import OrderedDict, defaultdict\nfrom typing import List\n\nimport pandas as pd\nimport torch\n\nlogger = logging.getLogger(__name__)\n\nJHU_DIRNAME = os.path.expanduser(\n \"~/github/CSSEGISandData/COVID-19/csse_covid_19_data/csse_covid_19_time_series\"\n)\n\nGISAID_DEDUP = {\n \"South America / Brazil / São Paulo\": \"South America / Brazil / Sao Paulo\",\n}\n\n# To update see explore-jhu-time-series.ipynb\nGISAID_TO_JHU = {\n \"a\": (\"us\",), # typo?\n \"anguilla\": (\"united kingdom\", \"anguilla\"),\n \"antigua\": (\"antigua and barbuda\",),\n \"aruba\": (\"netherlands\", \"aruba\"),\n \"belgique\": (\"belgium\",),\n \"bermuda\": (\"united kingdom\", \"bermuda\"),\n \"british virgin islands\": (\"united kingdom\", \"british virgin islands\"),\n \"bonaire\": (\"netherlands\", \"bonaire, sint eustatius and saba\"),\n \"bosni and herzegovina\": (\"bosni and herzegovina\",),\n \"burkinafaso\": (\"burkina faso\",),\n \"caribbean\": (\"dominican republic\",), # most populous island\n \"cayman islands\": (\"united kingdom\", \"cayman islands\"),\n \"canary islands\": (\"spain\",),\n \"cote divoire\": (\"cote d'ivoire\",),\n \"crimea\": (\"ukraine\",), # or \"russia\"?\n \"curacao\": (\"netherlands\", \"curacao\"),\n \"czech repubic\": (\"czechia\",),\n \"congo\": (\"congo (kinshasa)\",),\n \"cotedivoire\": (\"cote d'ivoire\",),\n \"czech republic\": (\"czechia\",),\n \"côte d'ivoire\": (\"cote d'ivoire\",),\n \"democratic republic of the congo\": (\"congo (kinshasa)\",),\n \"england\": (\"united kingdom\",),\n \"faroe islands\": (\"denmark\", \"faroe islands\"),\n \"french guiana\": (\"france\", \"french guiana\"),\n \"french polynesia\": (\"france\", \"french polynesia\"),\n \"gaborone\": (\"botswana\",),\n \"gibraltar\": (\"united kingdom\", \"gibraltar\"),\n \"guadeloupe\": (\"france\", \"guadeloupe\"),\n \"guinea bissau\": (\"guinea-bissau\",),\n \"guam\": (\"us\", \"guam\"),\n \"guyane\": (\"france\", \"french guiana\"),\n \"guyane francaise\": (\"france\", \"french guiana\"),\n \"hong kong\": (\"china\", \"hong kong\"),\n \"kavadarci\": (\"north macedonia\",),\n \"kazkahstan\": (\"kazakhstan\",),\n \"kochani\": (\"north macedonia\",),\n \"la reunion\": (\"france\", \"reunion\"),\n \"martinique\": (\"france\", \"martinique\"),\n \"mayotte\": (\"france\", \"mayotte\"),\n \"méxico\": (\"mexico\",),\n \"montserrat\": (\"united kingdom\", \"montserrat\"),\n \"myanmar\": (\"burma\",),\n \"netherlans\": (\"netherlands\",),\n \"niogeria\": (\"nigeria\",),\n \"northern mariana islands\": (\"us\", \"northern mariana islands\"),\n \"palestine\": (\"israel\",), # ?\n \"polynesia\": (\"france\", \"french polynesia\"),\n \"puerto rico\": (\"us\", \"puerto rico\"),\n \"republic of congo\": (\"congo (brazzaville)\",),\n \"republic of the congo\": (\"congo (brazzaville)\",),\n \"reunion\": (\"france\", \"reunion\"),\n \"réunion\": (\"france\", \"reunion\"),\n \"romaina\": (\"romania\",),\n \"saint barthelemy\": (\"france\", \"saint barthelemy\"),\n \"saint barthélemy\": (\"france\", \"saint barthelemy\"),\n \"saint martin\": (\"france\", \"st martin\"),\n \"sint eustatius\": (\"netherlands\", \"bonaire, sint eustatius and saba\"),\n \"sint maarten\": (\"france\", \"st martin\"),\n \"slovaia\": (\"slovakia\",),\n \"south korea\": (\"korea, south\",),\n \"st eustatius\": (\"netherlands\", \"bonaire, sint eustatius and saba\"),\n \"st. lucia\": (\"saint lucia\",),\n \"swizterland\": (\"switzerland\",),\n \"taiwan\": (\"china\",), # ?\n \"trinidad\": (\"trinidad and tobago\",),\n \"turks and caicos islands\": (\"united kingdom\", \"turks and caicos islands\"),\n \"united states\": (\"us\",),\n \"usa\": (\"us\",),\n \"usa? ohio\": (\"us\", \"ohio\"),\n \"union of the comoros\": (\"comoros\",),\n \"viet nam\": (\"vietnam\",),\n \"wallis and futuna\": (\"france\", \"wallis and futuna islands\"),\n \"slovak republic\": (\"slovakia\",),\n \"the bahamas\": (\"bahamas\",),\n \"rio de janeiro\": (\"brazil\",),\n \"parana\": (\"brazil\",),\n \"u.s. virgin islands\": (\"us\", \"us virgin islands\"),\n \"wallis and futuna islands\": (\"france\", \"wallis and futuna islands\"),\n}\n\n# To update see explore-jhu-time-series.ipynb\nJHU_TO_UN = {\n \"bolivia\": \"bolivia (plurinational state of)\",\n \"brunei\": \"brunei darussalam\",\n \"burma\": \"myanmar\",\n \"congo (brazzaville)\": \"congo\",\n \"congo (kinshasa)\": \"democratic republic of the congo\",\n \"cote d'ivoire\": \"côte d'ivoire\",\n \"diamond princess\": None, # cruise ship\n \"iran\": \"iran (islamic republic of)\",\n \"korea, south\": \"republic of korea\",\n \"kosovo\": \"serbia\",\n \"laos\": \"lao people's democratic republic\",\n \"moldova\": \"republic of moldova\",\n \"ms zaandam\": None, # cruise ship\n \"russia\": \"russian federation\",\n \"summer olympics 2020\": None, # event\n \"syria\": \"syrian arab republic\",\n \"taiwan*\": \"china, taiwan province of china\",\n \"tanzania\": \"united republic of tanzania\",\n \"us\": \"united states of america\",\n \"venezuela\": \"venezuela (bolivarian republic of)\",\n \"vietnam\": \"viet nam\",\n \"west bank and gaza\": \"israel\",\n \"slovak republic\": \"slovakia\",\n \"the bahamas\": \"bahamas\",\n \"parana\": \"brazil\",\n}\n\n\ndef read_csv(basename):\n return pd.read_csv(os.path.join(JHU_DIRNAME, basename), header=0)\n\n\ndef pd_to_torch(df, *, columns):\n if isinstance(columns, slice):\n columns = df.columns[columns]\n df = df[columns]\n return torch.from_numpy(df.to_numpy()).type_as(torch.tensor(()))\n\n\ndef parse_date(string):\n month, day, year_since_2000 = map(int, string.split(\"/\"))\n return datetime.datetime(day=day, month=month, year=2000 + year_since_2000)\n\n\ndef gisaid_normalize(gisaid_location):\n return GISAID_DEDUP.get(gisaid_location, gisaid_location)\n\n\ndef gisaid_to_jhu_location(\n gisaid_locations: typing.List[str],\n jhu_us_df: pd.DataFrame,\n jhu_global_df: pd.DataFrame,\n):\n \"\"\"\n Fuzzily match GISAID locations with Johns Hopkins locations.\n\n :param list gisaid_locations: A list of (unique) GISAID location names.\n :param pandas.DataFrame jhu_us_df: Johns Hopkins daily cases dataframe,\n ``time_series_covid19_confirmed_US.csv``.\n :param pandas.DataFrame jhu_global_df: Johns Hopkins daily cases dataframe,\n ``time_series_covid19_confirmed_global.csv``.\n :returns: A nonnegative weight matrix of shape\n ``(len(gisaid_locations), len(jhu_us_df) + len(jhu_global_df))``\n assuming GISAID locations are non-overlapping.\n \"\"\"\n assert isinstance(gisaid_locations, list)\n logger.info(\"Joining GISAID and JHU region codes\")\n\n # Extract location tuples from JHU data.\n jhu_locations: List[tuple] = []\n for i, row in jhu_us_df[[\"Country_Region\", \"Province_State\", \"Admin2\"]].iterrows():\n a, b, c = row\n if isinstance(c, str):\n jhu_locations.append((a.lower(), b.lower(), c.lower()))\n else:\n jhu_locations.append((a.lower(), b.lower()))\n for i, row in jhu_global_df[[\"Country/Region\", \"Province/State\"]].iterrows():\n a, b = row\n if isinstance(b, str):\n jhu_locations.append((a.lower(), b.lower()))\n else:\n jhu_locations.append((a.lower(),))\n assert len(jhu_locations) == len(jhu_us_df) + len(jhu_global_df)\n logger.info(\n f\"Matching {len(gisaid_locations)} GISAID regions \"\n f\"to {len(jhu_locations)} JHU fuzzy regions\"\n )\n jhu_location_ids = {k: i for i, k in enumerate(jhu_locations)}\n\n # Extract location tuples from GISAID data.\n gisaid_to_jhu = OrderedDict(\n (key, tuple(p.strip() for p in key.lower().split(\"/\")[1:]))\n for key in gisaid_locations\n )\n\n # Ensure each GISAID location maps at least approximately to some JHU tuple.\n jhu_prefixes = defaultdict(list) # maps prefixes to full JHU locations\n for value in jhu_locations:\n for i in range(1 + len(value)):\n jhu_prefixes[value[:i]].append(value)\n for key, value in list(gisaid_to_jhu.items()):\n if value and value[0] in GISAID_TO_JHU:\n value = GISAID_TO_JHU[value[0]] + value[1:]\n while value not in jhu_prefixes:\n value = value[:-1]\n if not value:\n raise ValueError(f\"Failed to find GISAID loctaion '{key}' in JHU data\")\n gisaid_to_jhu[key] = value\n\n # Construct a matrix many-to-many matching GISAID locations to JHU locations.\n matrix = torch.zeros(len(gisaid_locations), len(jhu_locations))\n for i, (gisaid_tuple, jhu_prefix) in enumerate(gisaid_to_jhu.items()):\n for jhu_location in jhu_prefixes[jhu_prefix]:\n j = jhu_location_ids[jhu_location]\n matrix[i, j] = 1\n # Distribute JHU cases evenly among GISAID locations.\n matrix /= matrix.sum(-1, True)\n matrix[~(matrix > 0)] = 0 # remove NANs\n\n return matrix\n" }, { "alpha_fraction": 0.5907859206199646, "alphanum_fraction": 0.622349739074707, "avg_line_length": 41.378379821777344, "blob_id": "6e9ab68ae6d735204e9b3bb3c5abd12feaf552dc", "content_id": "174c481a1b750e9ea585f7454c983962f46636c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6273, "license_type": "permissive", "max_line_length": 122, "num_lines": 148, "path": "/pyrocov/plot_additional_figs.R", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "### Create additional figures for pyro-cov paper\n# April 13 2021\n# [email protected]\n\nlibrary(tidyverse)\nlibrary(cowplot)\nlibrary(gt)\nlibrary(ggpubr)\n\ntop_strains <- read_tsv(\"~/Dropbox/COVID/pyro-cov/paper/strains.tsv\")#[,1:3]\nnames(top_strains) <- c(\"rank\" ,\"strain\", \"R_eff\", \"lower_ci\",\"upper_ci\",\"Cases_per_day\", \"Cases_total\", \"birthday\")\ntop_strains <- top_strains %>% mutate(log_samples = log10(Cases_per_day)) \np1 <- ggplot(top_strains, aes(x = R_eff, y = Cases_per_day, label = strain)) + geom_text(size = 3) + \n #geom_errorbarh(aes(xmin = lower_ci, xmax = upper_ci)) +\n theme_bw() + \n #geom_smooth(method = \"lm\") + \n labs(x = \"Fold Increase in Reproductive Number\", y = \"Log10(Cases per Day)\") + \n scale_y_log10()\np2 <- ggplot(top_strains, aes(x = R_eff)) + geom_histogram(binwidth = 0.05) + theme_bw() + \n labs(x = \"Fold Increase in Reproductive Number\", y = \"Count\")\n\np3 <- ggplot(top_strains, aes(y = R_eff, x = birthday, label = strain)) +\n geom_text(size = 3) + theme_bw() + \n labs( x= \"Date of Lineage Emergence\", y = \"Fold Increase in Reproductive Number\") + \n geom_smooth(method = \"lm\")\n\np3\nm1 <- lm(data = top_strains, Cases_per_day ~ R_eff)\nsummary(m1)\nm2 <- lm(data = top_strains, R_eff ~ birthday)\nsummary(m2)\n\nplot_grid(p2, p1,p3, nrow = 1, labels = c(\"A\", \"B\", \"C\"))\nggsave(\"~/Dropbox/COVID/pyro-cov/paper/spectrum_transmissibility.jpg\", height = 4, width = 12)\n\n\n\n# look at mutation occurance summaries\n\n\nhigh_transmit_muts <- top_muts %>% filter(rank < 101)\nlow_transmit_muts <- top_muts %>% filter(rank > 100)\nquantile(low_transmit_muts$num_emergences, c(0.05, 0.5, 0.95))\nquantile(high_transmit_muts$num_emergences, c(0.05, 0.5, 0.95))\nsummary(low_transmit_muts$num_emergences)\nsummary(high_transmit_muts$num_emergences)\nks.test(low_transmit_muts$num_emergences, high_transmit_muts$num_emergences)\n\np0.1 <- ggplot(top_muts, aes(x = num_emergences)) + \n geom_histogram(bins = 10) + \n scale_x_log10() + \n theme_bw() + \n labs(x = \"Number of Emergences\", title = \"Bottom 2132 Most Transmissible Lineages\")\np0.1\n\np0.2 <- ggplot(high_transmit_muts, aes(x = num_emergences)) + \n geom_histogram(bins = 10) + \n scale_x_log10() + \n theme_bw() + \n labs(x = \"Number of Emergences\", title = \"100 Most Transmissible Mutations\")\np0.2\nemergence_dist <- rbind(data.frame(num_emergences = high_transmit_muts[c(\"num_emergences\")], Rank = \"Top 100\"), \n data.frame(num_emergences = low_transmit_muts[c(\"num_emergences\")], Rank = \"Bottom 2132\"))\n\np0.3 <- ggplot(emergence_dist, aes(x = num_emergences, fill = Rank)) +\n geom_histogram(aes(y = stat(count) / sum(count)),bins = 10) + \n scale_x_log10() + \n theme_bw() + \n labs(x = \"Number of Emergences\")\np0.3\nplot_grid(p0.1, p0.2, labels = c(\"A\", \"B\"))\n\nggsave(\"~/Dropbox/COVID/pyro-cov/paper/convergent_evolution.jpg\", height = 4, width = 12)\n\n# construct table 1 \nstrains_table <- top_strains[,c(\"rank\", \"strain\", \"R_eff\", \"lower_ci\", \"upper_ci\", \"Cases_per_day\")]\nstrains_table <- strains_table %>% mutate(CI = paste(round(lower_ci, 2), round(upper_ci, 2), sep=\"-\")) %>% \n mutate(R_eff = round(R_eff, 2)) %>% \n mutate(Cases_per_day = round(Cases_per_day, 0))\nstrains_table <- strains_table[,c(\"rank\", \"strain\", \"R_eff\", \"CI\", \"Cases_per_day\")]\nnames(strains_table) <- c(\"Rank\", \"Pango Lineage\", \"Fold Increase in R\", \"Delta R_eff CI\", \"Cases per day\")\nt1 <- strains_table[1:10,] %>% gt() %>% \n cols_align(align = \"center\") %>% tab_style(style = cell_text(weight = \"bold\"), \n locations = cells_column_labels(columns = everything())\n )\nt1\n\n# construct table 2\n\ntop_muts <- read_tsv(\"~/Dropbox/COVID/pyro-cov/paper/mutations.tsv\")\nnames(top_muts) <- c(\"rank\" ,\"mutation\", \"mean_sd\",\"mean\", \"lower_ci\",\"upper_ci\",\"R_eff\", \"emerged_in\")\ntop_muts <- top_muts %>% mutate(num_emergences = \n sapply(emerged_in, function(x) length(str_split(x, \",\")[[1]]))) %>% \n mutate(CI = paste(round(lower_ci, 2), round(upper_ci, 2), sep=\"-\")) %>% \n mutate(R_eff = round(R_eff, 2)) %>% \n mutate(mean = round(mean, 2)) %>% \n mutate(mean_sd = round(mean_sd, 2))\n\nmuts_table <- top_muts[,c(\"rank\", \"mutation\",\"mean\", \"CI\", \"R_eff\", \"num_emergences\")]\nnames(muts_table) <- c(\"Rank\",\"AA Substitution\", \"mean\", \"95% CI\", \"Fold Increase in R\", \"Number of Lineages\")\nt2 <- muts_table[1:20,] %>% gt() %>% \n cols_align(align = \"center\") %>% tab_style(style = cell_text(weight = \"bold\"), \n locations = cells_column_labels(columns = everything())\n )\nt2\n\n# plot \n\ntop_muts <- top_muts %>% \n mutate(gene = sapply(top_muts$mutation, function(x) str_split(x, \":\")[[1]][1])) %>% \n mutate(Effect = ifelse(top_muts$R_eff > 1, \"Increase Transmissibility\", \"Decrease Transmissibility\")) %>%\n mutate(Rank = ifelse(top_muts$rank < 101, \"Top 100\", \"Bottom 2132\"))\n\n\np4 <- ggplot(data = subset(top_muts, R_eff > 0 & abs(mean_sd) > quantile(abs(mean_sd), 0.9)), aes(x = gene, y = R_eff)) + \n geom_violin(draw_quantiles = c(0.5), adjust = 2) + \n geom_jitter(aes(color = Effect), width = 0.3, height = 0) + \n theme_bw() + \n theme(axis.text.x = element_text(angle = 90)) + \n labs( x = \"Gene\", y = \"Fold Change in Reproductive Number\") + \n theme(legend.position = c(0.75, 0.9), legend.background = element_blank())\np4\n\np5 <- ggplot(top_muts, aes(x = num_emergences, y = R_eff, label = mutation)) + \n geom_jitter(width = 0.01, aes()) + \n geom_text(data = subset(top_muts, rank < 10), size = 4, color = \"black\", position = position_jitter(w=0.05,h=0.05)) + \n theme_bw() + \n geom_smooth(method = \"lm\") + \n labs( x = \"Number of Emergences\", y = \"Fold Change in Reproductive Number\") + \n scale_x_log10() + \n theme(legend.position = c(0.85, 0.85), legend.background = element_blank())\np5\n\nm5 <- lm(top_muts$R_eff ~ top_muts$num_emergences)\n\np6 <- ggplot(top_muts, aes(x = num_emergences)) + \n geom_histogram(bins = 10) + \n theme_bw() + \n scale_x_log10() + \n labs(x = \"Number of Emergenges\")\np6\n\nplot_grid(p6, p5, p4, labels = c(\"A\", \"B\", \"C\"), nrow = 1)\nggsave(\"~/Dropbox/COVID/pyro-cov/paper/mutation_summaries.jpg\", height = 5, width = 12)\n\n# print specific effects for individual mutations\n\nD614G <- top_muts %>% filter(mutation == \"S:D614G\")\n\n" }, { "alpha_fraction": 0.7332490682601929, "alphanum_fraction": 0.7420986294746399, "avg_line_length": 16.600000381469727, "blob_id": "ea26969a481a2b09c188e68e066fcc37fbd3436e", "content_id": "8c4a832d019e123f0bae5d6c44da0a98b750e181", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 791, "license_type": "permissive", "max_line_length": 61, "num_lines": 45, "path": "/Tutorial.md", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Pyro models for SARS-CoV-2 analysis -- Reproducing analysis\n\n\n# Installation\n\n## Clone this repository\nClone this repository to ~/pyro-cov\n\n## Install nextclade\n\nDepending on your platform do:\n\n```sh\nmake install-nextalign-linux\nmake install-nextclade-linux\n```\nor \n```sh\nmake install-nextalign\nmake install-nextclade\n```\n## Install the package\n```sh\npip install -e .[test]\n```\n\n## Getting GISAID data\n1. Work with GISAID to get a data agreement.\n2. Create a directory ~/data/gisaid/\n3. Update pull_gisaid.sh file with your credentials and feed\n\n## Install dependencies\n1. conda install nodejs\n2. npm install --global @nextstrain/nextclade\n3. make update \n\nRun vary holdout experiements\n```sh\npython mutrans.py --vary-holdout \n```\n\nRun backtesting experiments\n```sh\n./run_backtesting.py\n``" }, { "alpha_fraction": 0.556228518486023, "alphanum_fraction": 0.5627762079238892, "avg_line_length": 33.710227966308594, "blob_id": "f6c6230347466870894fb9206db7fd599a5d2682", "content_id": "3dbbb333c3d161af36cdb60201e01e16fdd9fcb4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6109, "license_type": "permissive", "max_line_length": 85, "num_lines": 176, "path": "/pyrocov/fasta.py", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Copyright Contributors to the Pyro-Cov project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport hashlib\nimport logging\nimport os\nimport shutil\nfrom collections import defaultdict\nfrom subprocess import check_call\n\nlogger = logging.getLogger(__name__)\nNEXTSTRAIN_DATA = os.path.expanduser(\"~/github/nextstrain/nextclade/data/sars-cov-2\")\n\n\ndef hash_sequence(seq):\n hasher = hashlib.sha1()\n hasher.update(seq.replace(\"\\n\", \"\").encode(\"utf-8\"))\n return hasher.hexdigest()\n\n\nclass NextcladeDB:\n \"\"\"\n Database to store nextclade results through time, so that only new samples\n need to be sequenced.\n \"\"\"\n\n def __init__(self, fileprefix=\"results/nextcladedb\", max_fasta_count=4000):\n fileprefix = os.path.realpath(fileprefix)\n self.header_filename = fileprefix + \".header.tsv\"\n self.rows_filename = fileprefix + \".rows.tsv\"\n self.rows_temp_filename = fileprefix + \"rows.temp.tsv\"\n self.fasta_filename = fileprefix + \".temp.fasta\"\n self.tsv_filename = fileprefix + \".temp.tsv\"\n self.output_dir = os.path.dirname(self.tsv_filename)\n\n # Load hashes of already-aligned sequences.\n self._already_aligned = set()\n if os.path.exists(self.rows_filename):\n with open(self.rows_filename) as f:\n for line in f:\n key = line.split(\"\\t\", 1)[0]\n self._already_aligned.add(key)\n\n self.max_fasta_count = max_fasta_count\n self._fasta_file = open(self.fasta_filename, \"wt\")\n self._pending = set()\n\n self._tasks = defaultdict(list)\n\n def schedule(self, sequence, *fn_args):\n \"\"\"\n Schedule a task for a given input ``sequence``.\n \"\"\"\n key = hash_sequence(sequence)\n if key not in self._already_aligned:\n self._schedule_alignment(key, sequence)\n self._tasks[key].append(fn_args)\n\n def maybe_schedule(self, sequence, *fn_args):\n \"\"\"\n Schedule a task iff no new alignment work is required.\n Tasks requiring new alignment work will be silently dropped.\n \"\"\"\n key = hash_sequence(sequence)\n if key in self._already_aligned:\n self._tasks[key].append(fn_args)\n\n def wait(self, log_every=1000):\n \"\"\"\n Wait for all scheduled or maybe_scheduled tasks to complete.\n \"\"\"\n self._flush()\n with open(self.header_filename) as f:\n header = f.read().strip().split(\"\\t\")\n with open(self.rows_filename) as f:\n for i, row in enumerate(f):\n row = row.strip().split(\"\\t\")\n key = row[0]\n row = dict(zip(header, row))\n for fn_args in self._tasks.pop(key, []):\n fn, args = fn_args[0], fn_args[1:]\n fn(*args, row)\n if log_every and i % log_every == 0:\n print(\".\", end=\"\", flush=True)\n\n def _schedule_alignment(self, key, sequence):\n self._fasta_file.write(\">\")\n self._fasta_file.write(key)\n self._fasta_file.write(\"\\n\")\n self._fasta_file.write(sequence)\n self._fasta_file.write(\"\\n\")\n self._pending.add(key)\n if len(self._pending) >= self.max_fasta_count:\n self._flush()\n\n def _flush(self):\n if not self._pending:\n return\n self._fasta_file.close()\n cmd = [\n \"./nextclade\",\n f\"--input-root-seq={NEXTSTRAIN_DATA}/reference.fasta\",\n \"--genes=E,M,N,ORF1a,ORF1b,ORF3a,ORF6,ORF7a,ORF7b,ORF8,ORF9b,S\",\n f\"--input-gene-map={NEXTSTRAIN_DATA}/genemap.gff\",\n f\"--input-tree={NEXTSTRAIN_DATA}/tree.json\",\n f\"--input-qc-config={NEXTSTRAIN_DATA}/qc.json\",\n f\"--input-pcr-primers={NEXTSTRAIN_DATA}/primers.csv\",\n f\"--input-fasta={self.fasta_filename}\",\n f\"--output-tsv={self.tsv_filename}\",\n f\"--output-dir={self.output_dir}\",\n ]\n logger.info(\" \".join(cmd))\n check_call(cmd)\n\n # Append to a copy to ensure atomicity.\n if os.path.exists(self.rows_filename):\n shutil.copyfile(self.rows_filename, self.rows_temp_filename)\n with open(self.tsv_filename) as f:\n with open(self.rows_temp_filename, \"a\") as frows:\n for i, line in enumerate(f):\n if i:\n frows.write(line)\n else:\n with open(self.header_filename, \"w\") as fheader:\n fheader.write(line)\n os.rename(self.rows_temp_filename, self.rows_filename)\n os.remove(self.fasta_filename)\n os.remove(self.tsv_filename)\n self._fasta_file = open(self.fasta_filename, \"w\")\n self._already_aligned.update(self._pending)\n self._pending.clear()\n\n\nclass ShardedFastaWriter:\n \"\"\"\n Writer that splits into multiple fasta files to avoid nextclade file size\n limit.\n \"\"\"\n\n def __init__(self, filepattern, max_count=5000):\n assert filepattern.count(\"*\") == 1\n self.filepattern = filepattern\n self.max_count = max_count\n self._file_count = 0\n self._line_count = 0\n self._file = None\n\n def _open(self):\n filename = self.filepattern.replace(\"*\", str(self._file_count))\n print(f\"writing to {filename}\")\n return open(filename, \"wt\")\n\n def __enter__(self):\n assert self._file is None\n self._file = self._open()\n self._file_count += 1\n return self\n\n def __exit__(self, *args, **kwargs):\n self._file.close()\n self._file = None\n self._file_count = 0\n self._line_count = 0\n\n def write(self, name, sequence):\n if self._line_count == self.max_count:\n self._file.close()\n self._file = self._open()\n self._file_count += 1\n self._line_count = 0\n self._file.write(\">\")\n self._file.write(name)\n self._file.write(\"\\n\")\n self._file.write(sequence)\n self._file.write(\"\\n\")\n self._line_count += 1\n" }, { "alpha_fraction": 0.6279579401016235, "alphanum_fraction": 0.6358457207679749, "avg_line_length": 32.55882263183594, "blob_id": "9cd1deb287df74542920f381a71d5fac93243fa3", "content_id": "decba1c5c4bbb277bb30a7aa57cc65e5654e4062", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2282, "license_type": "permissive", "max_line_length": 84, "num_lines": 68, "path": "/run_nextclade.py", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Copyright Contributors to the Pyro-Cov project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport json\nimport logging\nimport os\nimport pickle\nfrom collections import Counter\n\nfrom pyrocov.fasta import NextcladeDB\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format=\"%(relativeCreated) 9d %(message)s\", level=logging.INFO)\n\n\ndef count_mutations(mutation_counts, row):\n # Check whether row is valid\n if row[\"qc.overallStatus\"] != \"good\":\n return\n for col in [\"aaSubstitutions\", \"aaDeletions\"]:\n ms = row[col]\n if ms:\n mutation_counts.update(ms.split(\",\"))\n\n\ndef main(args):\n logger.info(f\"Filtering {args.gisaid_file_in}\")\n if not os.path.exists(args.gisaid_file_in):\n raise OSError(\n \"Each user must independently request a data feed from gisaid.org\"\n )\n os.makedirs(\"results\", exist_ok=True)\n\n db = NextcladeDB()\n schedule = db.maybe_schedule if args.no_new else db.schedule\n mutation_counts = Counter()\n with open(args.gisaid_file_in, \"rt\") as f:\n for i, line in enumerate(f):\n seq = json.loads(line)[\"sequence\"]\n\n # Filter by length.\n nchars = sum(seq.count(b) for b in \"ACGT\")\n if args.min_nchars <= nchars <= args.max_nchars:\n seq = seq.replace(\"\\n\", \"\")\n schedule(seq, count_mutations, mutation_counts)\n\n if i % args.log_every == 0:\n print(\".\", end=\"\", flush=True)\n db.wait(log_every=args.log_every)\n\n logger.info(f\"saving {args.counts_file_out}\")\n with open(args.counts_file_out, \"wb\") as f:\n pickle.dump(dict(mutation_counts), f)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Run NextClade on all sequences\")\n parser.add_argument(\n \"--gisaid-file-in\", default=os.path.expanduser(\"results/gisaid.json\")\n )\n parser.add_argument(\"--counts-file-out\", default=\"results/nextclade.counts.pkl\")\n parser.add_argument(\"--min-nchars\", default=29000, type=int)\n parser.add_argument(\"--max-nchars\", default=31000, type=int)\n parser.add_argument(\"--no-new\", action=\"store_true\")\n parser.add_argument(\"-l\", \"--log-every\", default=1000, type=int)\n args = parser.parse_args()\n main(args)\n" }, { "alpha_fraction": 0.6104076504707336, "alphanum_fraction": 0.6156578063964844, "avg_line_length": 37.7784423828125, "blob_id": "d17a691f1906e0ecf98c8808cd16ab606648c735", "content_id": "0c85e6a2d7f69556d344b735d2845f8db521b94f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6476, "license_type": "permissive", "max_line_length": 87, "num_lines": 167, "path": "/featurize_nextclade.py", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Copyright Contributors to the Pyro-Cov project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport json\nimport logging\nimport pickle\nimport re\nfrom collections import Counter, defaultdict\n\nimport torch\n\nfrom pyrocov.fasta import NextcladeDB\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format=\"%(relativeCreated) 9d %(message)s\", level=logging.INFO)\n\n\ndef count_mutations(mutation_counts, status_counts, row):\n # Check whether row is valid\n status = row[\"qc.overallStatus\"]\n status_counts[status] += 1\n if status != \"good\":\n return\n mutation_counts[None] += 1 # hack to count number of lineages\n for col in [\"aaSubstitutions\", \"aaDeletions\"]:\n ms = row[col]\n if not ms:\n continue\n ms = ms.split(\",\")\n mutation_counts.update(ms)\n # Add within-gene pairs of mutations.\n by_gene = defaultdict(list)\n for m in ms:\n g, m = m.split(\":\")\n by_gene[g].append(m)\n for g, ms in by_gene.items():\n # Sort by position, then alphabetical.\n ms.sort(key=lambda m: (int(re.search(r\"\\d+\", m).group(0)), m))\n for i, m1 in enumerate(ms):\n for m2 in ms[i + 1 :]:\n mutation_counts[f\"{g}:{m1},{m2}\"] += 1\n\n\ndef main(args):\n # Load the filtered accession ids.\n logger.info(f\"Loading {args.columns_file_in}\")\n with open(args.columns_file_in, \"rb\") as f:\n columns = pickle.load(f)\n id_to_lineage = dict(zip(columns[\"accession_id\"], columns[\"lineage\"]))\n del columns\n\n # Count mutations via nextclade.\n # This is batched and cached under the hood.\n logger.info(f\"Loading {args.gisaid_file_in}\")\n lineage_mutation_counts = defaultdict(Counter)\n lineage_status_counts = defaultdict(Counter)\n db = NextcladeDB()\n with open(args.gisaid_file_in, \"rt\") as f:\n for i, line in enumerate(f):\n datum = json.loads(line)\n\n # Filter to sequences with sufficient data.\n lineage = id_to_lineage.get(datum[\"covv_accession_id\"])\n if lineage is None:\n continue\n nchars = sum(datum[\"sequence\"].count(b) for b in \"ACGT\")\n if not (args.min_nchars <= nchars <= args.max_nchars):\n continue\n\n # Schedule sequence for alignment.\n seq = datum[\"sequence\"].replace(\"\\n\", \"\")\n mutation_counts = lineage_mutation_counts[lineage]\n status_counts = lineage_status_counts[lineage]\n db.schedule(seq, count_mutations, mutation_counts, status_counts)\n\n if i % args.log_every == 0:\n print(\".\", end=\"\", flush=True)\n db.wait(log_every=args.log_every)\n\n message = [\"Total quality:\"]\n status_counts = Counter()\n for c in lineage_status_counts.values():\n status_counts.update(c)\n for s, c in status_counts.most_common():\n message.append(f\"{s}: {c}\")\n logger.info(\"\\n\\t\".join(message))\n\n message = [\"Lineages with fewest good samples:\"]\n for c, l in sorted((c[\"good\"], l) for l, c in lineage_status_counts.items())[:20]:\n message.append(f\"{l}: {c}\")\n logger.info(\"\\n\\t\".join(message))\n\n # Collect a set of all single mutations observed in this subsample.\n agg_counts = Counter()\n for ms in lineage_mutation_counts.values():\n for m, count in ms.items():\n if m is not None and \",\" not in m:\n agg_counts[m] += count\n all_mutations = sorted(agg_counts)\n logger.info(f\"saving {args.counts_file_out}\")\n with open(args.counts_file_out, \"wb\") as f:\n pickle.dump(dict(agg_counts), f)\n\n # Filter to lineages with at least a few good samples.\n for lineage, status_counts in list(lineage_status_counts.items()):\n if status_counts[\"good\"] < args.min_good_samples:\n logger.info(f\"Dropping {lineage} with {status_counts}\")\n del lineage_mutation_counts[lineage]\n del lineage_status_counts[lineage]\n\n # Filter to features that occur in the majority of at least one lineage.\n lineage_counts = {\n k: v.pop(None) for k, v in lineage_mutation_counts.items() if None in v\n }\n mutations = set()\n for lineage, mutation_counts in list(lineage_mutation_counts.items()):\n if not mutation_counts:\n lineage_mutation_counts.pop(lineage)\n continue\n denominator = lineage_counts[lineage]\n for m, count in mutation_counts.items():\n if count / denominator >= 0.5:\n mutations.add(m)\n by_num = Counter(m.count(\",\") for m in mutations)\n logger.info(\n \"Keeping only ({} single + {} double) = {} of {} mutations\".format(\n by_num[0], by_num[1], len(mutations), len(all_mutations)\n )\n )\n\n # Convert to dense features.\n lineages = sorted(lineage_counts)\n mutations = sorted(mutations, key=lambda m: (m.count(\",\"), m))\n lineage_ids = {k: i for i, k in enumerate(lineages)}\n mutation_ids = {k: i for i, k in enumerate(mutations)}\n features = torch.zeros(len(lineage_ids), len(mutation_ids))\n for lineage, counts in lineage_mutation_counts.items():\n i = lineage_ids[lineage]\n denominator = lineage_counts[lineage]\n for mutation, count in counts.items():\n j = mutation_ids.get(mutation, None)\n if j is not None:\n features[i, j] = count / denominator\n\n result = {\n \"lineages\": lineages,\n \"mutations\": mutations,\n \"features\": features,\n \"all_mutations\": all_mutations,\n }\n logger.info(f\"saving {tuple(features.shape)}-features to {args.features_file_out}\")\n torch.save(result, args.features_file_out)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Featurize nextclade mutations\")\n parser.add_argument(\"--gisaid-file-in\", default=\"results/gisaid.json\")\n parser.add_argument(\"--columns-file-in\", default=\"results/gisaid.columns.pkl\")\n parser.add_argument(\"--features-file-out\", default=\"results/nextclade.features.pt\")\n parser.add_argument(\"--counts-file-out\", default=\"results/nextclade.counts.pkl\")\n parser.add_argument(\"--min-nchars\", default=29000, type=int)\n parser.add_argument(\"--max-nchars\", default=31000, type=int)\n parser.add_argument(\"--min-good-samples\", default=5, type=float)\n parser.add_argument(\"-l\", \"--log-every\", default=1000, type=int)\n args = parser.parse_args()\n main(args)\n" }, { "alpha_fraction": 0.6909722089767456, "alphanum_fraction": 0.7048611044883728, "avg_line_length": 31, "blob_id": "1b1bd421dca474d39dbefd1c186b254bfedf10c9", "content_id": "779cc16ea6341689b53c6c44f209cd9ef79fe58f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 288, "license_type": "permissive", "max_line_length": 72, "num_lines": 9, "path": "/pull_gisaid.sh", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "#!/bin/sh -ex\n\n# Ensure data directory (or a link) exists.\ntest -e results || mkdir results\n\n# Download and decompress data.\ncurl -u $GISAID_USERNAME:$GISAID_PASSWORD --retry 4 \\\n https://www.epicov.org/epi3/3p/$GISAID_FEED/export/provision.json.xz \\\n | xz -d -T8 > results/gisaid.json\n" }, { "alpha_fraction": 0.5507421493530273, "alphanum_fraction": 0.5625498294830322, "avg_line_length": 35.8464469909668, "blob_id": "912805867517ef494e391bb607b145f3219ef810", "content_id": "66dc7ee3211a3e67c62942c5e00c3c2ac964173f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38885, "license_type": "permissive", "max_line_length": 88, "num_lines": 1055, "path": "/pyrocov/mutrans.py", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Copyright Contributors to the Pyro-Cov project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport functools\nimport logging\nimport math\nimport pickle\nimport re\nimport warnings\nfrom collections import Counter, OrderedDict, defaultdict\nfrom timeit import default_timer\nfrom typing import List\n\nimport numpy as np\nimport pyro\nimport pyro.distributions as dist\nimport torch\nfrom pyro import poutine\nfrom pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO\nfrom pyro.infer.autoguide import (\n AutoDelta,\n AutoGuideList,\n AutoLowRankMultivariateNormal,\n AutoNormal,\n AutoStructured,\n)\nfrom pyro.infer.reparam import LocScaleReparam\nfrom pyro.nn.module import PyroModule, PyroParam\nfrom pyro.ops.streaming import CountMeanVarianceStats, StatsOfDict\nfrom pyro.optim import ClippedAdam\nfrom pyro.poutine.util import site_is_subsample\nfrom torch.distributions import constraints\n\nimport pyrocov.geo\n\nfrom . import pangolin, sarscov2\nfrom .util import pearson_correlation\n\n# Requires https://github.com/pyro-ppl/pyro/pull/2953\ntry:\n from pyro.infer.autoguide.effect import AutoRegressiveMessenger\nexcept ImportError:\n AutoRegressiveMessenger = object\n\nlogger = logging.getLogger(__name__)\n\n# Reasonable values might be week (7), fortnight (14), or month (28)\nTIMESTEP = 14 # in days\nGENERATION_TIME = 5.5 # in days\nSTART_DATE = \"2019-12-01\"\n\n\ndef date_range(stop):\n start = datetime.datetime.strptime(START_DATE, \"%Y-%m-%d\")\n step = datetime.timedelta(days=TIMESTEP)\n return np.array([start + step * t for t in range(stop)])\n\n\ndef get_fine_regions(columns, min_samples=50):\n \"\"\"\n Select regions that have at least ``min_samples`` samples.\n Remaining regions will be coarsely aggregated up to country level.\n \"\"\"\n # Count number of samples in each subregion.\n counts = Counter()\n for location in columns[\"location\"]:\n parts = location.split(\"/\")\n if len(parts) < 2:\n continue\n parts = tuple(p.strip() for p in parts[:3])\n counts[parts] += 1\n\n # Select fine countries.\n return frozenset(parts for parts, count in counts.items() if count >= min_samples)\n\n\ndef rank_loo_lineages(\n full_dataset: dict,\n full_result: dict,\n min_samples: int = 100,\n) -> List[str]:\n \"\"\"\n Compute a list of lineages ranked in descending order of how much their\n growth rate differs from their parents' growth rate. This is used in growth\n rate leave-one-out prediction experiments.\n \"\"\"\n # Decompress lineage names before computing parents.\n lineage_id_inv = [\n pangolin.decompress(name) for name in full_dataset[\"lineage_id_inv\"]\n ]\n lineage_id = {name: i for i, name in enumerate(lineage_id_inv)}\n ancestors = set(lineage_id)\n\n # Filter to often-observed lineages.\n weekly_strains = full_dataset[\"weekly_strains\"] # [T, P, S]\n lineage_counts = weekly_strains.sum([0, 1]) # [S]\n lineages = []\n for c, child in enumerate(lineage_id_inv):\n if child in (\"A\", \"B\", \"B.1\"):\n continue # ignore very early lineages\n if lineage_counts[c] < min_samples:\n continue # ignore rare lineages\n lineages.append(child)\n\n # Sort leaf nodes by their distance from parent.\n rate_loc = full_result[\"median\"][\"rate_loc\"]\n ranked_lineages = []\n for child in lineages:\n # Allow grandparent to adopt orphan, since e.g. B.1.617 is missing from\n # lineage_id, but B.1.617.2 is very important.\n parent = pangolin.get_most_recent_ancestor(child, ancestors)\n assert parent is not None\n c = lineage_id[child]\n p = lineage_id[parent]\n gap = (rate_loc[c] - rate_loc[p]).abs().item()\n ranked_lineages.append((gap, child))\n ranked_lineages.sort(reverse=True)\n\n # Compress lineage names before returning.\n return [pangolin.compress(name) for gap, name in ranked_lineages]\n\n\ndef load_gisaid_data(\n *,\n device=\"cpu\",\n include={},\n exclude={},\n end_day=None,\n gisaid_columns_filename=\"results/gisaid.columns.pkl\",\n nextclade_features_filename=\"results/nextclade.features.pt\",\n) -> dict:\n \"\"\"\n Loads the two files gisaid_columns_filename and nextclade_features_filename,\n converts teh input to PyTorch tensors and truncates the data according to\n ``include`` and ``exclude``.\n\n Keyword arguments:\n device -- torch device to use\n include --\n exclude --\n end_day -- last day to include\n gisaid_columns_filename --\n nextclade_features_filename --\n \"\"\"\n logger.info(\"Loading data\")\n include = include.copy()\n exclude = exclude.copy()\n\n if end_day:\n logger.info(f\"Load gisaid data end_day: {end_day}\")\n\n # Load ``gisaid_columns_filename``\n with open(gisaid_columns_filename, \"rb\") as f:\n columns = pickle.load(f)\n\n logger.info(\"Training on {} rows with columns:\".format(len(columns[\"day\"])))\n logger.info(\", \".join(columns.keys()))\n\n # Filter regions to at least 50 sample and aggregate rest to country level\n fine_regions = get_fine_regions(columns)\n\n # Filter features into numbers of mutations and possibly genes.\n aa_features = torch.load(nextclade_features_filename)\n mutations = aa_features[\"mutations\"]\n features = aa_features[\"features\"].to(\n device=device, dtype=torch.get_default_dtype()\n )\n keep = [m.count(\",\") == 0 for m in mutations] # restrict to single mutations\n if include.get(\"gene\"):\n re_gene = re.compile(include.pop(\"gene\"))\n keep = [k and bool(re_gene.search(m)) for k, m in zip(keep, mutations)]\n if exclude.get(\"gene\"):\n re_gene = re.compile(exclude.pop(\"gene\"))\n keep = [k and not re_gene.search(m) for k, m in zip(keep, mutations)]\n if include.get(\"region\"):\n gene, region = include.pop(\"region\")\n lb, ub = sarscov2.GENE_STRUCTURE[gene][region]\n for i, m in enumerate(mutations):\n g, m = m.split(\":\")\n if g != gene:\n keep[i] = False\n continue\n match = re.search(\"[0-9]+\", m)\n assert match is not None\n pos = int(match.group())\n if not (lb < pos <= ub):\n keep[i] = False\n mutations = [m for k, m in zip(keep, mutations) if k]\n if mutations:\n features = features[:, keep]\n else:\n warnings.warn(\"No mutations selected; using empty features\")\n mutations = [\"S:D614G\"] # bogus\n features = features[:, :1] * 0\n logger.info(\"Loaded {} feature matrix\".format(\" x \".join(map(str, features.shape))))\n\n # Aggregate regions\n\n # Get lineages\n lineages = list(map(pangolin.compress, columns[\"lineage\"]))\n lineage_id_inv = list(map(pangolin.compress, aa_features[\"lineages\"]))\n lineage_id = {k: i for i, k in enumerate(lineage_id_inv)}\n\n sparse_data: dict = Counter()\n location_id: dict = OrderedDict()\n\n # Set of lineages that are skipped\n skipped = set()\n\n # Generate sparse_data\n for virus_name, day, location, lineage in zip(\n columns[\"virus_name\"], columns[\"day\"], columns[\"location\"], lineages\n ):\n if lineage not in lineage_id:\n if lineage not in skipped:\n skipped.add(lineage)\n logger.warning(f\"WARNING skipping unsampled lineage {lineage}\")\n continue\n\n # Filter by include/exclude\n row = {\n \"virus_name\": virus_name,\n \"location\": location,\n \"day\": day,\n \"lineage\": pangolin.compress(lineage),\n }\n if not all(re.search(v, row[k]) for k, v in include.items()):\n continue\n if any(re.search(v, row[k]) for k, v in exclude.items()):\n continue\n\n # Filter by day\n if end_day is not None:\n if day > end_day:\n continue\n\n # preprocess parts\n parts = location.split(\"/\")\n if len(parts) < 2:\n continue\n parts = tuple(p.strip() for p in parts[:3])\n if len(parts) == 3 and parts not in fine_regions:\n parts = parts[:2]\n location = \" / \".join(parts)\n\n p = location_id.setdefault(location, len(location_id))\n s = lineage_id[lineage]\n t = day // TIMESTEP\n sparse_data[t, p, s] += 1\n\n # Generate weekly_strains tensor from sparse_data\n if end_day is not None:\n T = 1 + end_day // TIMESTEP\n else:\n T = 1 + max(columns[\"day\"]) // TIMESTEP\n\n P = len(location_id)\n S = len(lineage_id)\n weekly_strains = torch.zeros(T, P, S)\n for (t, p, s), n in sparse_data.items():\n weekly_strains[t, p, s] = n\n\n logger.info(f\"Dataset size [T x P x S] {T} x {P} x {S}\")\n\n logger.info(\n f\"Keeping {int(weekly_strains.sum())}/{len(lineages)} rows \"\n f\"(dropped {len(lineages) - int(weekly_strains.sum())})\"\n )\n\n # Filter regions.\n num_times_observed = (weekly_strains > 0).max(2).values.sum(0)\n ok_regions = (num_times_observed >= 2).nonzero(as_tuple=True)[0]\n ok_region_set = set(ok_regions.tolist())\n logger.info(f\"Keeping {len(ok_regions)}/{weekly_strains.size(1)} regions\")\n weekly_strains = weekly_strains.index_select(1, ok_regions)\n locations = [k for k, v in location_id.items() if v in ok_region_set]\n location_id = OrderedDict(zip(locations, range(len(locations))))\n\n # Construct region-local time scales centered around observations.\n num_obs = weekly_strains.sum(-1)\n local_time = torch.arange(float(len(num_obs))) * TIMESTEP / GENERATION_TIME\n local_time = local_time[:, None]\n local_time = local_time - (local_time * num_obs).sum(0) / num_obs.sum(0)\n\n return {\n \"location_id\": location_id,\n \"mutations\": mutations,\n \"weekly_strains\": weekly_strains,\n \"features\": features,\n \"lineage_id\": lineage_id,\n \"lineage_id_inv\": lineage_id_inv,\n \"local_time\": local_time,\n }\n\n\ndef subset_gisaid_data(\n gisaid_dataset: dict,\n location_queries=None,\n max_strains=math.inf,\n) -> dict:\n \"\"\"\n Selects a small subset of data for exploratory fitting of a small model.\n This is not used in the final published results.\n \"\"\"\n old = gisaid_dataset\n new = old.copy()\n\n # Select locations.\n if location_queries is not None:\n locations = sorted(\n {\n location\n for location in new[\"location_id\"]\n if any(q in location for q in location_queries)\n }\n )\n ids = torch.tensor([old[\"location_id\"][location] for location in locations])\n new[\"location_id\"] = {name: i for i, name in enumerate(locations)}\n new[\"weekly_strains\"] = new[\"weekly_strains\"].index_select(1, ids)\n new[\"local_time\"] = new[\"local_time\"].index_select(1, ids)\n\n # Select strains.\n if new[\"weekly_strains\"].size(-1) > max_strains:\n ids = (\n new[\"weekly_strains\"]\n .sum([0, 1])\n .sort(0, descending=True)\n .indices[:max_strains]\n )\n new[\"weekly_strains\"] = new[\"weekly_strains\"].index_select(-1, ids)\n new[\"features\"] = new[\"features\"].index_select(0, ids)\n new[\"lineage_id_inv\"] = [new[\"lineage_id_inv\"][i] for i in ids.tolist()]\n new[\"lineage_id\"] = {name: i for i, name in enumerate(new[\"lineage_id_inv\"])}\n\n # Select mutations.\n gaps = new[\"features\"].max(0).values - new[\"features\"].min(0).values\n ids = (gaps >= 0.5).nonzero(as_tuple=True)[0]\n new[\"mutations\"] = [new[\"mutations\"][i] for i in ids.tolist()]\n new[\"features\"] = new[\"features\"].index_select(-1, ids)\n\n logger.info(\n \"Selected {}/{} places, {}/{} strains, {}/{} mutations, {}/{} samples\".format(\n len(new[\"location_id\"]),\n len(old[\"location_id\"]),\n len(new[\"lineage_id\"]),\n len(old[\"lineage_id\"]),\n len(new[\"mutations\"]),\n len(old[\"mutations\"]),\n int(new[\"weekly_strains\"].sum()),\n int(old[\"weekly_strains\"].sum()),\n )\n )\n\n return new\n\n\ndef load_jhu_data(gisaid_data: dict) -> dict:\n \"\"\"\n Load case count time series.\n\n This is used for plotting but is not used for fitting a model.\n \"\"\"\n # Load raw JHU case count data.\n us_cases_df = pyrocov.geo.read_csv(\"time_series_covid19_confirmed_US.csv\")\n global_cases_df = pyrocov.geo.read_csv(\"time_series_covid19_confirmed_global.csv\")\n daily_cases = torch.cat(\n [\n pyrocov.geo.pd_to_torch(us_cases_df, columns=slice(11, None)),\n pyrocov.geo.pd_to_torch(global_cases_df, columns=slice(4, None)),\n ]\n ).T\n logger.info(\n \"Loaded {} x {} daily case data, totaling {}\".format(\n *daily_cases.shape, daily_cases[-1].sum().item()\n )\n )\n\n # Convert JHU locations to GISAID locations.\n locations = list(gisaid_data[\"location_id\"])\n matrix = pyrocov.geo.gisaid_to_jhu_location(locations, us_cases_df, global_cases_df)\n assert matrix.shape == (len(locations), daily_cases.shape[-1])\n daily_cases = daily_cases @ matrix.T\n daily_cases[1:] -= daily_cases[:-1].clone() # cumulative -> density\n daily_cases.clamp_(min=0)\n assert daily_cases.shape[1] == len(gisaid_data[\"location_id\"])\n\n # Convert daily counts to TIMESTEP counts (e.g. weekly).\n start_date = datetime.datetime.strptime(START_DATE, \"%Y-%m-%d\")\n jhu_start_date = pyrocov.geo.parse_date(us_cases_df.columns[11])\n assert start_date < jhu_start_date\n dt = (jhu_start_date - start_date).days\n T = len(gisaid_data[\"weekly_strains\"])\n weekly_cases = daily_cases.new_zeros(T, len(locations))\n for w in range(TIMESTEP):\n t0 = (w + dt) // TIMESTEP\n source = daily_cases[w::TIMESTEP]\n destin = weekly_cases[t0 : t0 + len(source)]\n destin[:] += source[: len(destin)]\n assert weekly_cases.sum() > 0\n\n return {\n \"daily_cases\": daily_cases.clamp(min=0),\n \"weekly_cases\": weekly_cases.clamp(min=0),\n }\n\n\ndef model(dataset, model_type, *, forecast_steps=None):\n \"\"\"\n Bayesian regression model of lineage portions as a function of mutation features.\n\n This function can be run in two different modes:\n - During training, ``forecast_steps=None`` and the model is conditioned on\n observed data.\n - During prediction (after training), the likelihood statement is omitted\n and instead a ``probs`` tensor is recorded; this is the predicted lineage\n portions in each (time, regin) bin.\n \"\"\"\n # Tensor shapes are commented at at the end of some lines.\n features = dataset[\"features\"]\n local_time = dataset[\"local_time\"][..., None] # [T, P, 1]\n T, P, _ = local_time.shape\n S, F = features.shape\n if forecast_steps is None: # During inference.\n weekly_strains = dataset[\"weekly_strains\"]\n assert weekly_strains.shape == (T, P, S)\n else: # During prediction.\n T = T + forecast_steps\n t0 = local_time[0]\n dt = local_time[1] - local_time[0]\n local_time = t0 + dt * torch.arange(float(T))[:, None, None]\n assert local_time.shape == (T, P, 1)\n strain_plate = pyro.plate(\"strain\", S, dim=-1)\n place_plate = pyro.plate(\"place\", P, dim=-2)\n time_plate = pyro.plate(\"time\", T, dim=-3)\n\n # Configure reparametrization (which does not affect model density).\n reparam = {}\n if \"reparam\" in model_type:\n local_time = local_time + pyro.param(\n \"local_time\", lambda: torch.zeros(P, S)\n ) # [T, P, S]\n reparam[\"coef\"] = LocScaleReparam()\n if \"skip\" not in model_type:\n reparam[\"rate_loc\"] = LocScaleReparam()\n reparam[\"init_loc\"] = LocScaleReparam()\n reparam[\"rate\"] = LocScaleReparam()\n reparam[\"init\"] = LocScaleReparam()\n with poutine.reparam(config=reparam):\n\n # Sample global random variables.\n coef_scale = pyro.sample(\"coef_scale\", dist.LogNormal(-4, 2))\n if \"skip\" not in model_type:\n rate_loc_scale = pyro.sample(\"rate_loc_scale\", dist.LogNormal(-4, 2))\n init_loc_scale = pyro.sample(\"init_loc_scale\", dist.LogNormal(0, 2))\n rate_scale = pyro.sample(\"rate_scale\", dist.LogNormal(-4, 2))\n init_scale = pyro.sample(\"init_scale\", dist.LogNormal(0, 2))\n if \"poisson\" in model_type:\n pois_loc = pyro.sample(\"pois_loc\", dist.Normal(0, 4))\n pois_scale = pyro.sample(\"pois_scale\", dist.LogNormal(0, 4))\n\n # Assume relative growth rate depends strongly on mutations and weakly\n # on strain and place. Assume initial infections depend strongly on\n # strain and place.\n Dist = dist.Logistic if \"sparse\" in model_type else dist.Normal\n coef = pyro.sample(\"coef\", Dist(torch.zeros(F), coef_scale).to_event(1)) # [F]\n with strain_plate:\n rate_loc_loc = 0.01 * coef @ features.T\n if \"skip\" in model_type:\n rate_loc = pyro.deterministic(\"rate_loc\", rate_loc_loc) # [S]\n else:\n rate_loc = pyro.sample(\n \"rate_loc\", dist.Normal(rate_loc_loc, rate_loc_scale)\n ) # [S]\n init_loc = pyro.sample(\"init_loc\", dist.Normal(0, init_loc_scale)) # [S]\n with place_plate, strain_plate:\n rate = pyro.sample(\"rate\", dist.Normal(rate_loc, rate_scale)) # [P, S]\n init = pyro.sample(\"init\", dist.Normal(init_loc, init_scale)) # [P, S]\n\n # Finally observe counts.\n logits = init + rate * local_time # [T, P, S]\n if forecast_steps is None: # During inference.\n if \"poisson\" in model_type:\n with time_plate, place_plate:\n pois = pyro.sample(\"pois\", dist.LogNormal(pois_loc, pois_scale))\n # This softmax() breaks the strain_plate, but is more\n # numerically stable than exp(). AutoGaussian inference will be\n # approximate with softmax(), but would be intractable with\n # exp() and a second_strain_plate.\n lambda_ = (pois * logits.softmax(-1)).clamp_(min=1e-6)\n with time_plate, place_plate, strain_plate:\n pyro.sample(\n \"obs\",\n dist.Poisson(lambda_, is_sparse=True),\n obs=weekly_strains,\n ) # [T, P, S]\n else:\n with time_plate, place_plate:\n pyro.sample(\n \"obs\",\n dist.Multinomial(\n logits=logits[..., None, :], validate_args=False\n ),\n obs=weekly_strains[..., None, :],\n ) # [T, P, 1, S]\n else: # During prediction.\n with time_plate, place_plate, strain_plate:\n pyro.deterministic(\"probs\", logits.softmax(-1))\n\n\nclass InitLocFn:\n \"\"\"\n Initializer for latent variables.\n\n This is passed as the ``init_loc_fn`` to guides.\n \"\"\"\n\n def __init__(self, dataset):\n # Initialize init.\n init = dataset[\"weekly_strains\"].sum(0) # [P, S]\n init.add_(1 / init.size(-1)).div_(init.sum(-1, True))\n init.log_().sub_(init.median(-1, True).values)\n self.init = init # [P, S]\n self.init_decentered = init / 2\n self.init_loc = init.mean(0) # [S]\n self.init_loc_decentered = self.init_loc / 2\n assert not torch.isnan(self.init).any()\n self.pois = dataset[\"weekly_strains\"].sum(-1, True).clamp(min=0.1) # [T, P, 1]\n logger.info(f\"init stddev = {self.init.std():0.3g}\")\n\n def __call__(self, site):\n name = site[\"name\"]\n shape = site[\"fn\"].shape()\n if hasattr(self, name):\n result = getattr(self, name)\n assert result.shape == shape\n return result\n if name in (\"coef_scale\", \"init_scale\", \"init_loc_scale\"):\n return torch.ones(shape)\n if name == \"logits_scale\":\n return torch.full(shape, 0.002)\n if name in (\"rate_loc_scale\", \"rate_scale\", \"place_scale\", \"strain_scale\"):\n return torch.full(shape, 0.01)\n if name in (\n \"rate_loc\",\n \"rate_loc_decentered\",\n \"coef\",\n \"coef_decentered\",\n \"rate\",\n \"rate_decentered\",\n ):\n return torch.rand(shape).sub_(0.5).mul_(0.01)\n if name == \"coef_loc\":\n return torch.rand(shape).sub_(0.5).mul_(0.01).add_(1.0)\n if name == \"pois_loc\":\n return self.pois.log().mean()\n if name == \"pois_scale\":\n return self.pois.log().std()\n if name == \"pois\":\n return self.pois\n raise ValueError(f\"InitLocFn found unhandled site {repr(name)}; please update.\")\n\n\nclass Guide(AutoGuideList):\n \"\"\"\n Custom guide for large-scale inference.\n\n This combines a low-rank multivariate normal guide over small variables\n with a mean field guide over remaining latent variables.\n \"\"\"\n\n def __init__(self, model, init_loc_fn, init_scale, rank):\n super().__init__(model)\n\n # Jointly estimate globals, mutation coefficients, and strain coefficients.\n mvn = [\n \"coef_scale\",\n \"rate_loc_scale\",\n \"init_loc_scale\",\n \"rate_scale\",\n \"init_scale\",\n \"coef\",\n \"coef_decentered\",\n \"rate_loc\",\n \"rate_loc_decentered\",\n \"init_loc\",\n \"init_loc_decentered\",\n ]\n self.append(\n AutoLowRankMultivariateNormal(\n poutine.block(model, expose=mvn),\n init_loc_fn=init_loc_fn,\n init_scale=init_scale,\n rank=rank,\n )\n )\n model = poutine.block(model, hide=mvn)\n\n # Mean-field estimate all remaining latent variables.\n self.append(AutoNormal(model, init_loc_fn=init_loc_fn, init_scale=init_scale))\n\n\nclass GaussianGuide(AutoGuideList):\n def __init__(self, model, init_loc_fn, init_scale):\n super().__init__(model)\n from pyro.infer.autoguide import AutoGaussian\n\n self.append(\n AutoGaussian(\n poutine.block(model, hide_fn=self.hide_fn_1),\n init_loc_fn=init_loc_fn,\n init_scale=0.01,\n backend=\"funsor\",\n )\n )\n self.append(\n AutoNormal(\n poutine.block(model, hide_fn=self.hide_fn_2),\n init_loc_fn=init_loc_fn,\n init_scale=0.01,\n )\n )\n\n @staticmethod\n def hide_fn_1(msg):\n return msg[\"type\"] == \"sample\" and \"pois\" in msg[\"name\"]\n\n @staticmethod\n def hide_fn_2(msg):\n return msg[\"type\"] == \"sample\" and \"pois\" not in msg[\"name\"]\n\n\nclass RegressiveGuide(AutoRegressiveMessenger):\n def get_posterior(self, name, prior):\n if name == \"coef\":\n if not hasattr(self, \"coef\"):\n # Initialize.\n self.coef = PyroModule()\n n = prior.shape()[-1]\n rank = 100\n assert n > 1\n init_loc = self.init_loc_fn({\"name\": name, \"fn\": prior})\n self.coef.loc = PyroParam(init_loc, event_dim=1)\n self.coef.scale = PyroParam(\n torch.full((n,), self._init_scale),\n event_dim=1,\n constraint=constraints.positive,\n )\n self.coef.cov_factor = PyroParam(\n torch.empty(n, rank).normal_(0, 1 / rank ** 0.5),\n event_dim=2,\n )\n scale = self.coef.scale\n cov_factor = self.coef.cov_factor * scale.unsqueeze(-1)\n cov_diag = scale * scale\n return dist.LowRankMultivariateNormal(self.coef.loc, cov_factor, cov_diag)\n\n return super().get_posterior(name, prior)\n\n\[email protected]_grad()\[email protected](mask=False)\ndef predict(\n model,\n guide,\n dataset,\n model_type,\n *,\n num_samples=1000,\n vectorize=None,\n save_params=(\"rate\", \"init\", \"probs\"),\n forecast_steps=0,\n) -> dict:\n def get_conditionals(data):\n trace = poutine.trace(poutine.condition(model, data)).get_trace(\n dataset, model_type, forecast_steps=forecast_steps\n )\n return {\n name: site[\"value\"].detach()\n for name, site in trace.nodes.items()\n if site[\"type\"] == \"sample\" and not site_is_subsample(site)\n if name != \"obs\"\n }\n\n # Compute median point estimate.\n result: dict = defaultdict(dict)\n for name, value in get_conditionals(guide.median(dataset)).items():\n if value.numel() < 1e5 or name in save_params:\n result[\"median\"][name] = value\n\n # Compute moments.\n save_params = {\n k for k, v in result[\"median\"].items() if v.numel() < 1e5 or k in save_params\n }\n if vectorize is None:\n vectorize = result[\"median\"][\"probs\"].numel() < 1e6\n if vectorize:\n with pyro.plate(\"particles\", num_samples, dim=-4):\n samples = get_conditionals(guide())\n for k, v in samples.items():\n if k in save_params:\n result[\"mean\"][k] = v.mean(0).squeeze()\n result[\"std\"][k] = v.std(0).squeeze()\n else:\n stats = StatsOfDict({k: CountMeanVarianceStats for k in save_params})\n for _ in range(num_samples):\n stats.update(get_conditionals(guide()))\n print(\".\", end=\"\", flush=True)\n for name, stats_ in stats.get().items():\n if \"mean\" in stats_:\n result[\"mean\"][name] = stats_[\"mean\"]\n if \"variance\" in stats_:\n result[\"std\"][name] = stats_[\"variance\"].sqrt()\n return dict(result)\n\n\ndef fit_svi(\n dataset: dict,\n *,\n model_type: str,\n guide_type: str,\n cond_data={},\n forecast_steps=0,\n learning_rate=0.05,\n learning_rate_decay=0.1,\n num_steps=3001,\n num_samples=1000,\n clip_norm=10.0,\n rank=200,\n jit=True,\n log_every=50,\n seed=20210319,\n check_loss=False,\n) -> dict:\n \"\"\"\n Fits a variational posterior using stochastic variational inference (SVI).\n \"\"\"\n start_time = default_timer()\n\n logger.info(f\"Fitting {guide_type} guide via SVI\")\n pyro.set_rng_seed(seed)\n pyro.clear_param_store()\n param_store = pyro.get_param_store()\n\n # Initialize guide so we can count parameters and register hooks.\n cond_data = {k: torch.as_tensor(v) for k, v in cond_data.items()}\n model_ = poutine.condition(model, cond_data)\n init_loc_fn = InitLocFn(dataset)\n Elbo = JitTrace_ELBO if jit else Trace_ELBO\n if guide_type == \"map\":\n guide = AutoDelta(model_, init_loc_fn=init_loc_fn)\n elif guide_type == \"normal\":\n guide = AutoNormal(model_, init_loc_fn=init_loc_fn, init_scale=0.01)\n elif guide_type == \"full\":\n guide = AutoLowRankMultivariateNormal(\n model_, init_loc_fn=init_loc_fn, init_scale=0.01, rank=rank\n )\n elif guide_type == \"structured\":\n guide = AutoStructured(\n model_,\n init_loc_fn=init_loc_fn,\n init_scale=0.01,\n conditionals=defaultdict(\n lambda: \"normal\",\n rate_scale=\"delta\",\n init_loc_scale=\"delta\",\n init_scale=\"delta\",\n coef=\"mvn\",\n coef_decentered=\"mvn\",\n ),\n )\n elif guide_type == \"gaussian\":\n guide = GaussianGuide(model_, init_loc_fn=init_loc_fn, init_scale=0.01)\n elif guide_type == \"regressive\":\n guide = RegressiveGuide(model_, init_loc_fn=init_loc_fn, init_scale=0.01)\n else:\n guide = Guide(model_, init_loc_fn=init_loc_fn, init_scale=0.01, rank=rank)\n # This initializes the guide:\n latent_shapes = {k: v.shape for k, v in guide(dataset, model_type).items()}\n latent_numel = {k: v.numel() for k, v in latent_shapes.items()}\n logger.info(\n \"\\n\".join(\n [f\"Model has {sum(latent_numel.values())} latent variables of shapes:\"]\n + [f\" {k} {tuple(v)}\" for k, v in latent_shapes.items()]\n )\n )\n param_shapes = {k: v.shape for k, v in pyro.get_param_store().named_parameters()}\n param_numel = {k: v.numel() for k, v in param_shapes.items()}\n logger.info(\n \"\\n\".join(\n [f\"Guide has {sum(param_numel.values())} parameters of shapes:\"]\n + [f\" {k} {tuple(v)}\" for k, v in param_shapes.items()]\n )\n )\n\n # Log gradient norms during inference.\n series: dict = defaultdict(list)\n\n def hook(g, series):\n series.append(torch.linalg.norm(g.reshape(-1), math.inf).item())\n\n for name, value in pyro.get_param_store().named_parameters():\n value.register_hook(functools.partial(hook, series=series[name]))\n\n def optim_config(param_name):\n config: dict = {\n \"lr\": learning_rate,\n \"lrd\": learning_rate_decay ** (1 / num_steps),\n \"clip_norm\": clip_norm,\n }\n scalars = [k for k, v in latent_numel.items() if v == 1]\n if any(\"locs.\" + s in name for s in scalars):\n config[\"lr\"] *= 0.2\n elif \"scales\" in param_name:\n config[\"lr\"] *= 0.1\n elif \"scale_tril\" in param_name:\n config[\"lr\"] *= 0.05\n elif \"factors\" in param_name or \"prec_sqrts\" in param_name:\n config[\"lr\"] *= 0.05\n elif \"weight_\" in param_name:\n config[\"lr\"] *= 0.01\n elif \"weight\" in param_name:\n config[\"lr\"] *= 0.03\n elif \"_centered\" in param_name:\n config[\"lr\"] *= 0.1\n return config\n\n optim = ClippedAdam(optim_config)\n elbo = Elbo(max_plate_nesting=3, ignore_jit_warnings=True)\n svi = SVI(model_, guide, optim, elbo)\n losses = []\n num_obs = dataset[\"weekly_strains\"].count_nonzero()\n for step in range(num_steps):\n loss = svi.step(dataset=dataset, model_type=model_type)\n assert not math.isnan(loss)\n losses.append(loss)\n median = guide.median()\n for name, value in median.items():\n if value.numel() == 1:\n series[name].append(float(value))\n if log_every and step % log_every == 0:\n logger.info(\n \" \".join(\n [f\"step {step: >4d} L={loss / num_obs:0.6g}\"]\n + [\n \"{}={:0.3g}\".format(\n \"\".join(p[0] for p in k.split(\"_\")).upper(), v.item()\n )\n for k, v in median.items()\n if v.numel() == 1\n ]\n )\n )\n if check_loss and step >= 50:\n prev = torch.tensor(losses[-50:-25], device=\"cpu\").median().item()\n curr = torch.tensor(losses[-25:], device=\"cpu\").median().item()\n assert (curr - prev) < num_obs, \"loss is increasing\"\n\n result = predict(\n model_,\n guide,\n dataset,\n model_type,\n num_samples=num_samples,\n forecast_steps=forecast_steps,\n )\n result[\"losses\"] = losses\n series[\"loss\"] = losses\n result[\"series\"] = dict(series)\n result[\"params\"] = {\n k: v.detach().float().cpu().clone()\n for k, v in param_store.items()\n if v.numel() < 1e7\n }\n result[\"walltime\"] = default_timer() - start_time\n return result\n\n\[email protected]_grad()\ndef log_stats(dataset: dict, result: dict) -> dict:\n \"\"\"\n Logs statistics of predictions and model fit in the ``result`` of\n ``fit_svi()``.\n\n :param dict dataset: The dataset dictionary.\n :param dict result: The output of :func:`fit_svi`.\n :returns: A dictionary of statistics.\n \"\"\"\n stats = {k: float(v) for k, v in result[\"median\"].items() if v.numel() == 1}\n stats[\"loss\"] = float(np.median(result[\"losses\"][-100:]))\n mutations = dataset[\"mutations\"]\n mean = result[\"mean\"][\"coef\"].cpu()\n if not mean.shape:\n return stats # Work around error in map estimation.\n\n # Statistical significance.\n std = result[\"std\"][\"coef\"].cpu()\n sig = mean.abs() / std\n logger.info(f\"|μ|/σ [median,max] = [{sig.median():0.3g},{sig.max():0.3g}]\")\n stats[\"|μ|/σ median\"] = sig.median()\n stats[\"|μ|/σ max\"] = sig.max()\n\n # Effects of individual mutations.\n for name in [\"S:D614G\", \"S:N501Y\", \"S:E484K\", \"S:L452R\"]:\n if name not in mutations:\n continue\n i = mutations.index(name)\n m = mean[i] * 0.01\n s = std[i] * 0.01\n logger.info(f\"ΔlogR({name}) = {m:0.3g} ± {s:0.2f}\")\n stats[f\"ΔlogR({name}) mean\"] = m\n stats[f\"ΔlogR({name}) std\"] = s\n\n # Growth rates of individual lineages.\n try:\n i = dataset[\"lineage_id\"][\"A\"]\n rate_A = result[\"mean\"][\"rate\"][..., i].mean(0)\n except KeyError:\n rate_A = result[\"mean\"][\"rate\"].median()\n for s in [\"B.1.1.7\", \"B.1.617.2\"]:\n i = dataset[\"lineage_id\"][s]\n rate = result[\"median\"][\"rate\"][..., i].mean()\n R_RA = (rate - rate_A).exp()\n logger.info(f\"R({s})/R(A) = {R_RA:0.3g}\")\n stats[f\"R({s})/R(A)\"] = R_RA\n\n # Accuracy of mutation-only model, ie without region-local effects.\n true = dataset[\"weekly_strains\"] + 1e-20 # avoid nans\n counts = true.sum(-1, True)\n true_probs = true / counts\n local_time = dataset[\"local_time\"][..., None]\n if \"local_time\" in result[\"params\"]:\n local_time = local_time + result[\"params\"][\"local_time\"].to(local_time.device)\n rate = 0.01 * result[\"median\"][\"coef\"] @ dataset[\"features\"].T\n pred = result[\"median\"][\"init\"] + rate * local_time\n pred -= pred.logsumexp(-1, True) # apply log sigmoid function\n kl = true.mul(true_probs.log() - pred).sum(-1)\n kl = stats[\"naive KL\"] = kl.sum() / counts.sum() # in units of nats / observation\n error = (pred.exp() - true_probs) * counts ** 0.5 # scaled by Poisson stddev\n mae = stats[\"naive MAE\"] = error.abs().sum(-1).mean()\n rmse = stats[\"naive RMSE\"] = error.square().sum(-1).mean().sqrt()\n logger.info(f\"naive KL = {kl:0.4g}, MAE = {mae:0.4g}, RMSE = {rmse:0.4g}\")\n\n # Posterior predictive error.\n pred = result[\"median\"][\"probs\"][: len(true)] + 1e-20 # truncate, avoid nans\n kl = true.mul(true_probs.log() - pred.log()).sum([0, -1])\n error = (pred - true_probs) * counts ** 0.5 # scaled by Poisson stddev\n mae = error.abs().mean(0) # average over time\n mse = error.square().mean(0) # average over time\n stats[\"MAE\"] = mae.sum(-1).mean() # average over region\n stats[\"RMSE\"] = mse.sum(-1).mean().sqrt() # root average over region\n stats[\"KL\"] = kl.sum() / counts.sum() # in units of nats / observation\n logger.info(\"KL = {KL:0.4g}, MAE = {MAE:0.4g}, RMSE = {RMSE:0.4g}\".format(**stats))\n\n # Examine the MSE and RMSE over a few regions of interest.\n queries = {\n \"England\": [\"B.1.1.7\"],\n # \"England\": [\"B.1.1.7\", \"B.1.177\", \"B.1.1\", \"B.1\"],\n # \"USA / California\": [\"B.1.1.7\", \"B.1.429\", \"B.1.427\", \"B.1.2\", \"B.1\", \"P.1\"],\n }\n for place, strains in queries.items():\n matches = [p for name, p in dataset[\"location_id\"].items() if place in name]\n if not matches:\n continue\n assert len(matches) == 1, matches\n p = matches[0]\n stats[f\"{place} KL\"] = kl[p].sum() / true[:, p].sum()\n stats[f\"{place} MAE\"] = mae[p].sum()\n stats[f\"{place} RMSE\"] = mse[p].sum().sqrt()\n logger.info(\n \"{}\\tKL = {:0.3g}, MAE = {:0.3g}, RMSE = {:0.3g}\".format(\n place,\n stats[f\"{place} KL\"],\n stats[f\"{place} MAE\"],\n stats[f\"{place} RMSE\"],\n )\n )\n\n for strain in strains:\n s = dataset[\"lineage_id\"][strain]\n stats[f\"{place} {strain} MAE\"] = mae[p, s]\n stats[f\"{place} {strain} RMSE\"] = mse[p, s].sqrt()\n logger.info(\n \"{} {}\\tMAE = {:0.3g}, RMSE = {:0.3g}\".format(\n place,\n strain,\n stats[f\"{place} {strain} MAE\"],\n stats[f\"{place} {strain} RMSE\"],\n )\n )\n\n return {k: float(v) for k, v in stats.items()}\n\n\[email protected]_grad()\ndef log_holdout_stats(fits: dict) -> dict:\n \"\"\"\n Logs statistics comparing multiple results from ``fit_svi``.\n \"\"\"\n assert len(fits) > 1\n fits = list(fits.items())\n stats = {}\n for i, (name1, fit1) in enumerate(fits[:-1]):\n for name2, fit2 in fits[i + 1 :]:\n # Compute mutation similarity.\n mutations = sorted(set(fit1[\"mutations\"]) & set(fit2[\"mutations\"]))\n medians = []\n for fit in (fit1, fit2):\n mutation_id = {m: i for i, m in enumerate(fit[\"mutations\"])}\n idx = torch.tensor([mutation_id[m] for m in mutations])\n medians.append(fit[\"median\"][\"coef\"][idx] * 0.01)\n error = medians[0] - medians[1]\n mutation_std = torch.cat(medians).std().item()\n mutation_rmse = error.square().mean().sqrt().item()\n mutation_mae = error.abs().mean().item()\n mutation_correlation = pearson_correlation(medians[0], medians[1]).item()\n\n # Compute lineage similarity.\n means = []\n for fit in (fit1, fit2):\n rate = fit[\"mean\"][\"rate\"]\n if rate.dim() == 2:\n rate = rate.mean(0)\n means.append(rate)\n error = means[0] - means[1]\n lineage_std = torch.cat(means).std().item()\n lineage_rmse = error.square().mean().sqrt().item()\n lineage_mae = error.abs().mean().item()\n lineage_correlation = pearson_correlation(means[0], means[1]).item()\n\n # Print stats.\n logger.info(\n f\"{name1} vs {name2} mutations: \"\n f\"ρ = {mutation_correlation:0.3g}, \"\n f\"RMSE = {mutation_rmse:0.3g}, \"\n f\"MAE = {mutation_mae:0.3g}\"\n )\n logger.info(\n f\"{name1} vs {name2} lineages: \"\n f\"ρ = {lineage_correlation:0.3g}, \"\n f\"RMSE = {lineage_rmse:0.3g}, \"\n f\"MAE = {lineage_mae:0.3g}\"\n )\n\n # Save stats.\n stats[\"mutation_corr\"] = mutation_correlation\n stats[\"mutation_rmse\"] = mutation_rmse\n stats[\"mutation_mae\"] = mutation_mae\n stats[\"mutation_stddev\"] = mutation_std\n stats[\"lineage_corr\"] = lineage_correlation\n stats[\"lineage_rmse\"] = lineage_rmse\n stats[\"lineage_mae\"] = lineage_mae\n stats[\"lineage_stdev\"] = lineage_std\n\n return {k: float(v) for k, v in stats.items()}\n" }, { "alpha_fraction": 0.7523409724235535, "alphanum_fraction": 0.7720374464988708, "avg_line_length": 41.42465591430664, "blob_id": "634fa4378ea78d17e56065b7a00f2ee2d21db819", "content_id": "19e51a69f753bf1416b752847b4a5e81f43896e5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3097, "license_type": "permissive", "max_line_length": 475, "num_lines": 73, "path": "/paper/README.md", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Images and data for publication\n\nThis directory contains figures and tables output by the [PyR<sub>0</sub>\nmodel](https://www.medrxiv.org/content/10.1101/2021.09.07.21263228v1). These\noutputs are aggregated to weeks, PANGO lineages, and amino acid changes.\n\nFigures and tables are generated by first running preprocessing and inference,\nthen postprocessing with the following Jupyter notebooks:\n[ `mutrans.ipynb` ](../mutrans.ipynb),\n[ `mutrans_gene.ipynb` ](../mutrans_gene.ipynb),\n[ `mutrans_prediction.ipynb` ](../mutrans_prediction.ipynb),\n[ `mutrans_backtesting.ipynb` ](../mutrans_backtesting.ipynb).\n\n## Data tables\n\n- [Mutation table](mutations.tsv) is ranked by statistical significance.\n The \"mean\" field denotes the estimated effect on log growth rate of each mutation.\n- [Lineage table](strains.tsv) is ranked by growth rate.\n\n## Manhattan plots\n\n![Manhattan plot of entire genome](manhattan.png)\n![Manhattan plot of N gene](manhattan_N.png)\n![Manhattan plot of S gene](manhattan_S.png)\n![Manhattan plot of ORF1a gene](manhattan_ORF1a.png)\n![Manhattan plot of ORF1b gene](manhattan_ORF1b.png)\n\n## Information density plots\n\n![ELBO of various genes](vary_gene_elbo.png)\n![ELBO of various NSPs](vary_nsp_elbo.png)\n\n## Volcano plot\n\n![Volcano plot of mutations](volcano.png)\n\n## Strain characterization plots\n\n![Growth rate versus emergence date](strain_emergence.png)\n![Growth rate versus case count](strain_prevalence.png)\n![Forecast](forecast.png)\n![Deep scanning](deep_scanning.png)\n\n## Cross validation plots\n\nThe following plots assess robustness via 2-fold crossvalidation, splitting data into Europe versus (World w/o Europe).\n\n![Lineage correlation](lineage_agreement.png)\n![Mutation correlation](mutation_agreement.png)\n![Lineage box plots](strain_europe_boxplot.png)\n![Mutation box plots](mutation_europe_boxplot_rankby_s.png)\n![Mutation box plots](mutation_europe_boxplot_rankby_t.png)\n![Lineage prediction](lineage_prediction.png)\n\n## Misc plots\n\n![Logistic distribution](logistic_distribution.png)\n\n## Acknowledgements\n\nThe aggregated model outputs in this directory were generated from data inputs\nincluding GISAID records (https://gisaid.org), PANGO lineage classifications\n(https://cov-lineages.org), and case count time series from Johns-Hopkins\nUniversity (https://github.com/CSSEGISandData/COVID-19).\n\nWe gratefully acknowledge all data contributors, i.e. the Authors and their Originating laboratories responsible for obtaining the specimens, and their Submitting laboratories for generating the genetic sequence and metadata and sharing via the GISAID initiative [1,2] on which this research is based. A total of 2,160,748 submissions are included in this study. A complete list of 2.1million accession numbers are available in [accession_ids.txt.zip](accession_ids.txt.zip).\n\n1. GISAID Initiative and global contributors,\n EpiCoV(TM) human coronavirus 2019 database.\n GISAID (2020), (available at https://gisaid.org).\n2. S. Elbe, G. Buckland-Merrett,\n Data, disease and diplomacy: GISAID's innovative contribution to global health.\n Glob Chall. 1, 33-46 (2017).\n" }, { "alpha_fraction": 0.4466620683670044, "alphanum_fraction": 0.45216792821884155, "avg_line_length": 30.586956024169922, "blob_id": "840971a53cae414d6f95f415ee72e8ba9fb5ba13", "content_id": "e2a68f97737f21bc8ebc0e4d2195b303ff8967c6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1453, "license_type": "permissive", "max_line_length": 86, "num_lines": 46, "path": "/pyrocov/align.py", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "# Copyright Contributors to the Pyro-Cov project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nimport mappy\n\n# Source: https://samtools.github.io/hts-specs/SAMv1.pdf\nCIGAR_CODES = \"MIDNSHP=X\" # Note minimap2 uses only \"MIDNSH\"\n\n\nclass Differ:\n def __init__(self, ref, lb=0, ub=math.inf, **kwargs):\n self.ref = ref\n self.lb = lb\n self.ub = ub\n self.aligner = mappy.Aligner(seq=ref, **kwargs)\n\n def diff(self, seq):\n ref = self.ref\n lb = self.lb\n ub = self.ub\n diff = []\n for hit in self.aligner.map(seq):\n ref_pos = hit.r_st\n if ref_pos < lb:\n continue\n if ref_pos >= ub:\n break\n\n seq_pos = hit.q_st\n for size, code in hit.cigar:\n if code == 0: # M\n if seq[seq_pos : seq_pos + size] != ref[ref_pos : ref_pos + size]:\n for i in range(min(size, ub - ref_pos)):\n s = seq[seq_pos + i]\n if s != \"N\" and s != ref[ref_pos + i]:\n diff.append((ref_pos + i, \"X\", s))\n elif code == 1: # I\n diff.append((ref_pos, \"I\", seq[seq_pos : seq_pos + size]))\n elif code == 2: # D\n diff.append((ref_pos, \"D\", size))\n ref_pos += size\n seq_pos += size\n\n return diff\n" }, { "alpha_fraction": 0.6127527356147766, "alphanum_fraction": 0.6889579892158508, "avg_line_length": 34.07272720336914, "blob_id": "d3b470ee999937ff6af69a15f95abddcb07d5c46", "content_id": "cb67b8e8f666072e979b8c5185070c250ce05f59", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1929, "license_type": "permissive", "max_line_length": 291, "num_lines": 55, "path": "/README.md", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "[![Build Status](https://github.com/broadinstitute/pyro-cov/workflows/CI/badge.svg)](https://github.com/broadinstitute/pyro-cov/actions)\n\n# Pyro models for SARS-CoV-2 analysis\n\nThis repository is described in the paper [\"Analysis of 2.1 million SARS-CoV-2 genomes identifies mutations associated with transmissibility\"](https://www.medrxiv.org/content/10.1101/2021.09.07.21263228v1). Figures and supplementary data for that paper are in the [paper/](paper/) directory.\n\n## Reproducing\n\n1. Clone this repo into say `~/pyro-cov`\n2. `cd ~/pyro-cov`\n3. `make install` # installs dependencies\n4. `conda install nodejs`\n5. `npm install --global @nextstrain/nextclade`\n6. Work with GISAID to get a data agreement.\n7. Define environment variables `GISAID_USERNAME`, `GISAID_PASSWORD`, and `GISAID_FEED`\n8. `make update` # clones other data sources\n9. `python mutrans.py --vary-holdout`\n10. generate plots by running various jupyter notebooks, e.g. [mutrans.ipynb](mutrans.ipynb)\n\n## Installing\n\n```sh\nmake install\n```\nor literally\n```sh\npip install -e .\n```\n\n## Citing\n\nIf you use this software, please consider citing:\n\n```\n@article {Obermeyer2021.09.07.21263228,\n author = {Obermeyer, Fritz and\n Schaffner, Stephen F. and\n Jankowiak, Martin and\n Barkas, Nikolaos and\n Pyle, Jesse D. and\n Park, Daniel J. and\n MacInnis, Bronwyn L. and\n Luban, Jeremy and\n Sabeti, Pardis C. and\n Lemieux, Jacob E.},\n title = {Analysis of 2.1 million SARS-CoV-2 genomes identifies mutations associated with transmissibility},\n elocation-id = {2021.09.07.21263228},\n year = {2021},\n doi = {10.1101/2021.09.07.21263228},\n publisher = {Cold Spring Harbor Laboratory Press},\n URL = {https://www.medrxiv.org/content/early/2021/09/13/2021.09.07.21263228},\n eprint = {https://www.medrxiv.org/content/early/2021/09/13/2021.09.07.21263228.full.pdf},\n journal = {medRxiv}\n}\n```\n" }, { "alpha_fraction": 0.6928080916404724, "alphanum_fraction": 0.7228044867515564, "avg_line_length": 30.44318199157715, "blob_id": "d613b067d95426e125c4c38fe5e03fc4e5fb6ac6", "content_id": "8b90f329acd11a10abdc1f36ef8170b9791b255e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2767, "license_type": "permissive", "max_line_length": 138, "num_lines": 88, "path": "/Makefile", "repo_name": "majagarbulinska/pyro-cov", "src_encoding": "UTF-8", "text": "SHELL := /bin/bash\n\ndata:\n\tln -sf ~/Google\\ Drive\\ File\\ Stream/Shared\\ drives/Pyro\\ CoV data\n\ninstall: install-nextalign FORCE\n\tpip install -e .[test]\n\ninstall-nextalign:\n\tcurl -fsSL \"https://github.com/nextstrain/nextclade/releases/latest/download/nextalign-MacOS-x86_64\" -o \"nextalign\" && chmod +x nextalign\n\ninstall-nextclade:\n\tcurl -fsSL \"https://github.com/nextstrain/nextclade/releases/latest/download/nextclade-MacOS-x86_64\" -o \"nextclade\" && chmod +x nextclade\n \ninstall-nextalign-linux:\n\tcurl -fsSL \"https://github.com/nextstrain/nextclade/releases/download/1.2.0/nextalign-Linux-x86_64\" -o nextalign && chmod +x nextalign\n\ninstall-nextclade-linux:\n\tcurl -fsSL \"https://github.com/nextstrain/nextclade/releases/download/1.2.0/nextclade-Linux-x86_64\" -o nextclade && chmod +x nextclade\n\nlint: FORCE\n\tflake8\n\tblack --extend-exclude=\\.ipynb --check .\n\tisort --check .\n\tpython scripts/update_headers.py --check\n\tmypy .\n\nformat: FORCE\n\tblack --extend-exclude=\\.ipynb .\n\tisort .\n\tpython scripts/update_headers.py\n\ntest: lint data FORCE\n\tpytest -v -n auto test\n\tpython mutrans.py --test -n 2 -s 4\n\nupdate: FORCE\n\t./pull_gisaid.sh\n\tpython git_pull.py cov-lineages/pango-designation\n\tpython git_pull.py CSSEGISandData/COVID-19\n\tpython git_pull.py nextstrain/nextclade\n\ttime nice python preprocess_gisaid.py\n\ttime python featurize_nextclade.py\n\nssh:\n\tgcloud compute ssh --project pyro-284215 --zone us-central1-c \\\n\t pyro-cov-fritzo-vm -- -AX\n\npush:\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n\t --recurse --compress \\\n\t results/gisaid.columns.pkl \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n\t --recurse --compress \\\n\t results/nextclade.features.pt \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/\n\npull:\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n\t --recurse --compress \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/mutrans.pt \\\n\t results/\n\n# This data is needed for mutrans.ipynb\npull-data:\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n\t --recurse --compress \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/\\{gisaid.columns.pkl,gisaid.stats.pkl,nextclade.features.pt,nextclade.counts.pkl\\} \\\n\t results/\n\npull-grid:\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n --recurse --compress \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/grid_search.tsv \\\n\t results/\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n\t --recurse --compress \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/mutrans.grid.pt \\\n\t results/\n\npull-leaves:\n\tgcloud compute scp --project pyro-284215 --zone us-central1-c \\\n\t --recurse --compress \\\n\t pyro-cov-fritzo-vm:~/pyro-cov/results/mutrans.vary_leaves.pt \\\n\t results/\n\nFORCE:\n" } ]
12
QUZF/python
https://github.com/QUZF/python
09389de3c5f1eb485a619f148fe6a585f9b3605f
48467f42aeee5f0b956776479c2332b046ce354a
a1e52d963485f9e9d534c8d4f65b0d173c3b30b1
refs/heads/master
2020-04-09T19:41:32.730633
2019-03-19T06:01:40
2019-03-19T06:01:40
160,550,808
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5952615737915039, "alphanum_fraction": 0.6120434403419495, "avg_line_length": 23.707317352294922, "blob_id": "cea7d732920c584c5ac0fd178e81614402961551", "content_id": "fd8a5c228e32896a7213add879cb12e76d998e86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 77, "num_lines": 41, "path": "/test_blog/test_case/blog_home/test_home.py", "repo_name": "QUZF/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# Author: QUZF\n# Datetime: 2019/1/25 15:47\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nimport unittest\nimport time\n\n\nclass BlogHome(unittest.TestCase):\n \"\"\"博客首页\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Firefox()\n cls.driver.get(\"http://www.cnblogs.com/JenniferQZF/\")\n cls.driver.implicitly_wait(5)\n\n @classmethod\n def tearDownClass(cls):\n time.sleep(3)\n cls.driver.quit()\n\n def test_01(self):\n \"\"\"验证元素存在:博客园\"\"\"\n locator = (\"id\", \"blog_nav_sitehome\")\n text = \"博客园\"\n result = EC.text_to_be_present_in_element(locator, text)(self.driver)\n self.assertTrue(result)\n\n def test_02(self):\n \"\"\"验证元素存在:首页\"\"\"\n locator = (\"id\", \"blog_nav_myhome\")\n text = \"首页\"\n result = EC.text_to_be_present_in_element(locator, text)(self.driver)\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6101794838905334, "alphanum_fraction": 0.6201823949813843, "avg_line_length": 29.044248580932617, "blob_id": "50b16c99126c820613eb524b1779abb6dcba53eb", "content_id": "4e824ecadb151b671a197c9374c0bbd545026bd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4035, "license_type": "no_license", "max_line_length": 95, "num_lines": 113, "path": "/test_blog/run_main.py", "repo_name": "QUZF/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# Author: QUZF\n# Datetime: 2019/1/25 15:46\n\nimport unittest\nimport time\nimport HTMLTestRunner\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport smtplib\nimport os\n\n# 这个是优化版:执行所有用例并发送报告,分四个步骤\n# 1.加载用例\n# 2.执行用例\n# 3.获取最新测试报告\n# 4.发送邮箱(这一步不想执行的话,注释掉最后面的函数即可)\n\n\ndef add_case(case_path, rule):\n \"\"\"加载所有的测试用例\"\"\"\n testsuit = unittest.TestSuite()\n # 定义discover方法的参数\n discover = unittest.defaultTestLoader.discover(case_path, pattern=rule, top_level_dir=None)\n # discover方法筛选出来的用例,循环添加到测试套件中\n # for test_suite in discover:\n # for test_case in test_suite:\n # testsuit.addTests(test_case)\n # print(testsuit)\n testsuit.addTests(discover) # 直接加载discover\n return testsuit\n\n\ndef run_case(all_case, report_path):\n \"\"\"执行所有的用例,并把结果写入测试报告\"\"\"\n now = time.strftime(\"%Y_%m_%d %H_%M_%S\")\n report_abspath = os.path.join(report_path, now+\"report.html\")\n # report_abspath = \"C:\\\\pycharm_workspace\\\\test_blog\\\\test_report\\\\\"+now+\"report.html\"\n fp = open(report_abspath, \"wb\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp,\n title=\"自动化测试报告\",\n description=\"用例执行情况:\")\n # 调用add_case函数返回值\n runner.run(all_case)\n time.sleep(2)\n fp.close()\n\n\ndef get_report_file(report_path):\n \"\"\"获取最新的测试报告\"\"\"\n lists = os.listdir(report_path)\n lists.sort(key=lambda fn: os.path.getmtime(os.path.join(report_path, fn)))\n print(\"最新测试生成的报告:\"+lists[-1])\n # 找到最新生成的报告文件\n report_file = os.path.join(report_path, lists[-1])\n return report_file\n\n\ndef send_mail(sender, psw, receiver, smtp_server, report_file):\n \"\"\"发送最新的测试报告内容\"\"\"\n # 读取测试报告的内容\n with open(report_file, \"rb\") as f:\n mail_body = f.read()\n # 定义邮件内容\n msg = MIMEMultipart()\n body = MIMEText(mail_body, _subtype='html', _charset='utf-8')\n msg['Subject'] = \"自动化测试报告\"\n msg[\"from\"] = sender\n msg[\"to\"] = receiver\n # 加上时间戳\n # msg[\"date\"] = time.strftime('%a,%d %b %Y %H_%M_%S %z')\n msg.attach(body)\n # 添加附件\n att = MIMEText(open(report_file, \"rb\").read(), \"base64\", \"utf-8\")\n att[\"Content-Type\"] = \"application/octet-stream\"\n att[\"Content-Disposition\"] = 'attachment; filename=\"report.html\"'\n msg.attach(att)\n # 登录邮箱\n smtp = smtplib.SMTP()\n # 连接邮箱服务器\n smtp.connect(smtp_server)\n time.sleep(2)\n # 用户名密码\n smtp.login(sender, psw)\n time.sleep(2)\n smtp.sendmail(sender, receiver, msg.as_string())\n smtp.quit()\n print('Test report email has been sent out!')\n\n\nif __name__ == '__main__':\n \"\"\"测试用例的路径、匹配规则\"\"\"\n # case_path = \"C:\\\\pycharm_workspace\\\\test_blog\\\\test_case\"\n case_path = os.path.join(os.getcwd(), \"test_case\")\n rule = \"test*.py\"\n all_case = add_case(case_path, rule) # 1.加载用例\n # 生成测试报告的路径\n time.sleep(2)\n # report_path = \"C:\\\\pycharm_workspace\\\\test_blog\\\\test_report\"\n report_path = os.path.join(os.getcwd(), \"test_report\")\n run_case(all_case, report_path) # 2.执行用例\n # 获取最新的测试报告文件\n time.sleep(2)\n report_file = get_report_file(report_path) # 3.获取最新的测试报告\n # 邮箱配置\n sender = \"[email protected]\"\n psw = \"fengyongyunming\"\n # 收件人多个时,中间用逗号隔开\n receiver = \"[email protected]\"\n smtp_server = \"smtp.163.com\"\n # 4.最后一步发送报告,如果不需要就注释掉\n time.sleep(3)\n send_mail(sender, psw, receiver, smtp_server, report_file)\n\n\n\n\n" }, { "alpha_fraction": 0.5466130375862122, "alphanum_fraction": 0.5761234164237976, "avg_line_length": 25.64285659790039, "blob_id": "c680f27533636a027cc35eaf99188d6810594790", "content_id": "e2193745f7fc66e09d38820cf4ce651c8d76e2bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1651, "license_type": "no_license", "max_line_length": 74, "num_lines": 56, "path": "/test_blog/test_case/blog_login/test_login.py", "repo_name": "QUZF/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# Author: QUZF\n# Datetime: 2019/1/25 15:47\n\nfrom selenium import webdriver\nimport unittest\nimport time\n\n\nclass Blog(unittest.TestCase):\n \"\"\"登录博客\"\"\"\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.get(\"https://passport.cnblogs.com/user/signin\")\n self.driver.implicitly_wait(5)\n\n def login(self, username, psw):\n \"\"\"写了一个登录的方法,帐号和密码参数化\"\"\"\n self.driver.find_element_by_id(\"input1\").send_keys(username)\n self.driver.find_element_by_id(\"input2\").send_keys(psw)\n self.driver.find_element_by_id(\"signin\").click()\n time.sleep(3)\n\n def is_login_sucess(self):\n \"\"\"判断是否获取到登录帐号名称\"\"\"\n try:\n text = self.driver.find_element_by_id(\"lnk_current_user\").text\n print(text)\n return True\n except:\n return False\n\n def test_01(self):\n \"\"\"登录用例1\"\"\"\n self.login(\"JenniferQZF\", \"4731210377qu!\") # 调用登录方法\n time.sleep(2)\n # 判断结果\n result = self.is_login_sucess()\n self.assertTrue(result)\n\n def test_02(self):\n \"\"\"登录用例2:测试时用不同的账号密码登录\"\"\"\n self.login(\"JenniferQZF\", \"4731210377qu!\") # 调用登录方法\n time.sleep(2)\n # 判断结果\n text = self.driver.find_element_by_id(\"lnk_current_user\").text\n print(text)\n self.assertEqual(text, \"风中花\")\n\n def tearDown(self):\n time.sleep(3)\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5268816947937012, "alphanum_fraction": 0.5685483813285828, "avg_line_length": 21.545454025268555, "blob_id": "66b5902a50db8d81ae49f39d118f60edb38666bd", "content_id": "e1f94796aa63c9234ff032b2370ac99748764530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 69, "num_lines": 33, "path": "/test_blog/test_case/blog_set/test_set.py", "repo_name": "QUZF/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# Author: QUZF\n# Datetime: 2019/1/25 15:48\n\nimport unittest\n\n\nclass MyTest01(unittest.TestCase):\n\n def setUp(self): # 此类中,每个方法用例执行前先执行一次setUp\n print('setUp')\n\n def test_case01(self): # 根据用例的名称来顺序执行的。\n print('test_case01')\n\n def test_case03(self): # 根据用例的名称来顺序执行的。\n print('test_case03')\n\n def test_case02(self): # 根据用例的名称来顺序执行的。\n print('test_case02')\n\n def tearDown(self): # 此类中,每个方法用例执行完均执行一次tearDown\n print('tearDown')\n\n\nclass MyTest02(unittest.TestCase):\n\n def test_case04(self): # 这个类没有前置后置方法,按用例名称顺序正常执行即可;\n print('test_case04')\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4838709533214569, "alphanum_fraction": 0.6612903475761414, "avg_line_length": 19.33333396911621, "blob_id": "a85e8b6d20f42702770af1e45cd0cb641a316da0", "content_id": "f5c9fa015a5622592f92c458e57544e4d48e4b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 27, "num_lines": 3, "path": "/test_blog/test_case/__init__.py", "repo_name": "QUZF/python", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# Author: QUZF\n# Datetime: 2019/1/25 15:45\n\n" } ]
5
therk987/PyTorch-Soft-Actor-Critic-SAC
https://github.com/therk987/PyTorch-Soft-Actor-Critic-SAC
0525ecd4f1e4380c1287aba52f86105e80709b56
9cbbeec3f3231f2235bfad85937bcfb4b53ba5f6
10c8d2e4695fe53e97f47ac827a4992f06de2ddd
refs/heads/main
2023-08-17T10:55:29.917070
2021-09-27T20:48:41
2021-09-27T20:48:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5394841432571411, "alphanum_fraction": 0.554960310459137, "avg_line_length": 32.054054260253906, "blob_id": "7edcc704f70850712986c5b62ae1509593bc4a4b", "content_id": "1c1ca5ea65f4670fef58fd4f92eecd950e310e3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5040, "license_type": "no_license", "max_line_length": 99, "num_lines": 148, "path": "/SAC.py", "repo_name": "therk987/PyTorch-Soft-Actor-Critic-SAC", "src_encoding": "UTF-8", "text": "from env_GoTogether import EnvGoTogether\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.distributions import Categorical\r\nimport numpy as np\r\nimport random\r\n\r\nclass ReplayBuffer:\r\n def __init__(self, capacity):\r\n self.capacity = capacity\r\n self.buffer = []\r\n self.position = 0\r\n\r\n def push(self, state, action, reward, next_state, done):\r\n if len(self.buffer) < self.capacity:\r\n self.buffer.append(None)\r\n self.buffer[self.position] = (state, action, reward, next_state, done)\r\n self.position = (self.position + 1) % self.capacity\r\n\r\n def sample(self, batch_size):\r\n batch = random.sample(self.buffer, batch_size)\r\n state, action, reward, next_state, done = map(np.stack, zip(*batch))\r\n return state, action, reward, next_state, done\r\n\r\n def __len__(self):\r\n return len(self.buffer)\r\n\r\nclass P_net(nn.Module):\r\n def __init__(self, state_dim, action_dim):\r\n super(P_net, self).__init__()\r\n self.fc1 = nn.Linear(state_dim, 256)\r\n self.fc2 = nn.Linear(256, 128)\r\n self.fc3 = nn.Linear(128, action_dim)\r\n\r\n def forward(self, x):\r\n x = torch.sigmoid(self.fc1(x))\r\n x = torch.sigmoid(self.fc2(x))\r\n action_score = self.fc3(x)\r\n return F.softmax(action_score, dim=-1)\r\n\r\nclass Q_net(nn.Module):\r\n def __init__(self, state_dim, action_dim):\r\n super(Q_net, self).__init__()\r\n self.fc1 = nn.Linear(state_dim, 256)\r\n self.fc2 = nn.Linear(256, 128)\r\n self.fc3 = nn.Linear(128, action_dim)\r\n\r\n def forward(self, x):\r\n h = torch.sigmoid(self.fc1(x))\r\n h = torch.sigmoid(self.fc2(h))\r\n q = self.fc3(h)\r\n return q\r\n\r\nclass SAC():\r\n def __init__(self, state_dim, action_dim):\r\n super(SAC, self).__init__()\r\n self.state_dim = state_dim\r\n self.action_dim = action_dim\r\n self.p_net = P_net(state_dim, action_dim)\r\n self.q_net = Q_net(state_dim, action_dim)\r\n self.gamma = 0.99\r\n self.alpha = 1\r\n self.loss_fn = torch.nn.MSELoss()\r\n self.q_optimizer = torch.optim.Adam(self.q_net.parameters(), lr=1e-3)\r\n self.p_optimizer = torch.optim.Adam(self.p_net.parameters(), lr=1e-4)\r\n\r\n def get_action(self, state):\r\n state = torch.from_numpy(state).float()\r\n action_prob = self.p_net.forward(state)\r\n c = Categorical(action_prob)\r\n action = c.sample()\r\n return action.item()\r\n\r\n def train(self, batch):\r\n state = batch[0]\r\n action = batch[1]\r\n reward = batch[2]\r\n next_state = batch[3]\r\n state = torch.from_numpy(state).float().squeeze(1)\r\n next_state = torch.from_numpy(next_state).float().squeeze(1)\r\n T = state.size()[0]\r\n\r\n # calculate V\r\n next_q = self.q_net.forward(next_state)\r\n next_a_prob = self.p_net.forward(next_state)\r\n next_v = next_a_prob*(next_q-self.alpha*torch.log(next_a_prob))\r\n next_v = torch.sum(next_v, 1)\r\n\r\n # train Q\r\n q = self.q_net.forward(state)\r\n expect_q = q.clone()\r\n for i in range(T):\r\n expect_q[i, action[i]] = reward[i] + self.gamma * next_v[i]\r\n loss = self.loss_fn(q, expect_q.detach())\r\n self.q_optimizer.zero_grad()\r\n loss.backward()\r\n self.q_optimizer.step()\r\n\r\n # train Actor\r\n q = self.q_net.forward(state)\r\n a_prob = self.p_net.forward(state)\r\n ploss = a_prob*(self.alpha*torch.log(a_prob)-q)\r\n ploss = torch.sum(ploss)\r\n ploss = ploss / T\r\n self.p_optimizer.zero_grad()\r\n ploss.backward()\r\n self.p_optimizer.step()\r\n\r\n def load_model(self):\r\n self.q_net = torch.load('SAC_q_net.pkl')\r\n self.p_net = torch.load('SAC_p_net.pkl')\r\n\r\n def save_model(self):\r\n torch.save(self.q_net, 'SAC_q_net.pkl')\r\n torch.save(self.p_net, 'SAC_p_net.pkl')\r\n\r\nif __name__ == '__main__':\r\n state_dim = 4\r\n action_dim = 4\r\n max_epi = 500\r\n max_mc = 1000\r\n epi_iter = 0\r\n mc_iter = 0\r\n acc_reward = 0\r\n reward_curve = []\r\n batch_size = 32\r\n replay_buffer = ReplayBuffer(50000)\r\n env = EnvGoTogether(13)\r\n agent = SAC(state_dim, action_dim)\r\n for epi_iter in range(max_epi):\r\n for mc_iter in range(max_mc):\r\n # env.render()\r\n state = env.get_state()\r\n action = agent.get_action(state)\r\n group_list = [action, 2]\r\n reward, done = env.step(group_list)\r\n next_state = env.get_state()\r\n acc_reward += reward\r\n replay_buffer.push(state, action, reward, next_state, done)\r\n if len(replay_buffer) > batch_size:\r\n agent.train(replay_buffer.sample(batch_size))\r\n if done:\r\n break\r\n print('epi', epi_iter, 'reward', acc_reward / mc_iter, 'MC', mc_iter, 'alpha', agent.alpha)\r\n env.reset()\r\n acc_reward = 0\r\n" }, { "alpha_fraction": 0.6623376607894897, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 14.399999618530273, "blob_id": "0f136084806f456a6804e664e3a133759a1f7d12", "content_id": "a7d4deda21be4e26454f564132f98b0dad65d78f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 154, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/README.md", "repo_name": "therk987/PyTorch-Soft-Actor-Critic-SAC", "src_encoding": "UTF-8", "text": "# PyTorch-Soft-Actor-Critic-SAC\n\n\nhttps://arxiv.org/abs/1910.07207v1\n\n\nSoft actor critic algorithm for discrete action.\n\n\nUse Sigmoid, never ReLU!!!!!!!!\n" } ]
2
vathes/pam-oauth2
https://github.com/vathes/pam-oauth2
3e9f829f7c84bd7e74d8f6f91408ccbbe5e5b9ac
c10d2b23c5f0d60f9d9df546ba92f1d865a179cb
62416784ea258239ea13fca791b8e48c4dff6d43
refs/heads/master
2023-08-31T16:40:13.501020
2022-09-19T20:35:11
2022-09-19T20:35:11
311,409,189
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6527414917945862, "alphanum_fraction": 0.6566579341888428, "avg_line_length": 21.52941131591797, "blob_id": "d4aa3124be876a69707428a533119d83c6779d00", "content_id": "ce1f5cb55db7c80d653fc1ad4a6cdc6136911fdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 77, "num_lines": 34, "path": "/tests/test.py", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "# docker exec -it pam-oauth2_app_1 python3 test.py\nimport os\nimport pam\n\n\n## simple test for user:password\np = pam.pam()\nprint(f\"reason: {p.reason}\")\nresponse = p.authenticate(\n os.getenv(\"DJ_AUTH_USER\"), os.getenv(\"DJ_AUTH_PASSWORD\"), service=\"oidc\"\n)\nprint(f\"Authenticated? {response}\")\nprint(f\"reason: {p.reason}\")\n\nresponse = p.authenticate(\n os.getenv(\"DJ_AUTH_USER\"), os.getenv(\"DJ_AUTH_PASSWORD\"), service=\"oidc\"\n)\nprint(f\"Authenticated? {response}\")\nprint(f\"reason: {p.reason}\")\n\n\n## simple test for user:token\np = pam.pam()\nprint(\n p.authenticate(\n os.getenv(\"DJ_AUTH_USER\"), os.getenv(\"DJ_AUTH_TOKEN\"), service=\"oidc\"\n )\n)\n\nprint(\n p.authenticate(\n os.getenv(\"DJ_AUTH_USER\"), os.getenv(\"DJ_AUTH_TOKEN\"), service=\"oidc\"\n )\n)\n" }, { "alpha_fraction": 0.6436517238616943, "alphanum_fraction": 0.668968141078949, "avg_line_length": 56.911109924316406, "blob_id": "978e32defd3d0163edd2f0d93b7d535f8a48fbaa", "content_id": "8057e37489cf6ef3863b286ff376d7f5816e3f59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2607, "license_type": "no_license", "max_line_length": 188, "num_lines": 45, "path": "/tests/test.sh", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# set -a && . .env && ./tests/test.sh mariadb && set +a\n# set -a && . .env && ./tests/test.sh percona && set +a\n\nmariadb() {\n set -e\n ROOT_PASSWORD=simple\n docker rm -f database\n docker run --name database -de MYSQL_ROOT_PASSWORD=${ROOT_PASSWORD} mariadb:10.7 # does not work with latest and non-v1\n until docker exec -it database mysql -h 127.0.0.1 -uroot -p${ROOT_PASSWORD} -e \"SELECT 1;\" 1>/dev/null\n do\n echo waiting...\n sleep 5\n done\n docker exec -it database mysql -uroot -p${ROOT_PASSWORD} -e \"INSTALL SONAME 'auth_pam_v1';\"\n docker cp ./config/service_example database:/etc/pam.d/oidc\n docker cp ./pam-oidc/target/debug/libpam_oidc.so database:/lib/x86_64-linux-gnu/security/libpam_oidc.so\n docker exec -it database mkdir /etc/datajoint\n docker cp ./config/libpam_oidc.yaml database:/etc/datajoint/\n docker exec -it database mysql -uroot -p${ROOT_PASSWORD} -e \"CREATE USER '${DJ_AUTH_USER}'@'%' IDENTIFIED VIA pam USING 'oidc';\"\n docker exec -it database mysql -h 127.0.0.1 -u${DJ_AUTH_USER} -p${DJ_AUTH_PASSWORD} -e \"SELECT 'delegated to oidc' as login;\"\n docker exec -it database mysql -h 127.0.0.1 -u${DJ_AUTH_USER} -p${DJ_AUTH_PASSWORD} -e \"SELECT 'delegated to oidc' as login;\"\n docker exec -it database mysql -h 127.0.0.1 -u${DJ_AUTH_USER} -pdeny -e \"SELECT 'delegated to oidc' as login;\"\n}\n\npercona() {\n set -e\n ROOT_PASSWORD=simple\n docker rm -f database\n docker run --name database -de MYSQL_ROOT_PASSWORD=${ROOT_PASSWORD} --entrypoint bash percona:8 -c \"echo 'plugin_load_add = auth_pam.so' >> /etc/my.cnf && /docker-entrypoint.sh mysqld\"\n until docker exec -it database mysql -h 127.0.0.1 -uroot -p${ROOT_PASSWORD} -e \"SELECT 1;\" 1>/dev/null\n do\n echo waiting...\n sleep 5\n done\n docker cp ./config/service_example database:/etc/pam.d/oidc\n docker cp ./pam-oidc/target/debug/libpam_oidc.so database:/usr/lib64/security/libpam_oidc.so\n docker exec -itu root database mkdir /etc/datajoint\n docker cp ./config/libpam_oidc.yaml database:/etc/datajoint/\n docker exec -it database mysql -uroot -p${ROOT_PASSWORD} -e \"CREATE USER '${DJ_AUTH_USER}'@'%' IDENTIFIED WITH auth_pam AS 'oidc';\"\n docker exec -it database mysql -h 127.0.0.1 -u${DJ_AUTH_USER} -p${DJ_AUTH_PASSWORD} -e \"SELECT 'delegated to oidc' as login;\"\n docker exec -it database mysql -h 127.0.0.1 -u${DJ_AUTH_USER} -p${DJ_AUTH_PASSWORD} -e \"SELECT 'delegated to oidc' as login;\"\n docker exec -it database mysql -h 127.0.0.1 -u${DJ_AUTH_USER} -pdeny -e \"SELECT 'delegated to oidc' as login;\"\n}\n\n" }, { "alpha_fraction": 0.46620047092437744, "alphanum_fraction": 0.5804196000099182, "avg_line_length": 19.380952835083008, "blob_id": "cd887f378b3a1d79c13c270313eb10e54c0134ef", "content_id": "fc796e25de7366d58e12e52115663eb23ad3eebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 429, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/pam-oidc/Cargo.toml", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "[package]\nname = \"pam-oidc\"\nversion = \"0.1.4\"\nauthors = [\"guzman-raphael <[email protected]>\"]\nedition = \"2021\"\n\n[dependencies]\nyaml-rust = \"0.4.5\"\nbase64 = \"0.13.0\"\noauth2 = \"4.2.3\"\nrand = \"0.8.5\"\nreqwest = { version = \"0.11.11\", features = [\"blocking\"] }\nserde_json = \"1.0.83\"\nlog = \"0.4.17\"\nlog4rs = \"1.1.1\"\nlog-panics = \"2.1.0\"\npamsm = { version = \"0.5.2\", features = [\"libpam\"] }\npkg-version = \"1.0.0\"\n\n[lib]\ncrate-type = [\"cdylib\"]\n\n" }, { "alpha_fraction": 0.629522442817688, "alphanum_fraction": 0.639652669429779, "avg_line_length": 35.3684196472168, "blob_id": "a33f1e1c89b3a21f35a53f574d2f36aea7c5be69", "content_id": "6a8df2471b9b98c9747bd74ad2446c475d6fefd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 691, "license_type": "no_license", "max_line_length": 96, "num_lines": 19, "path": "/docker-compose.yml", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "# docker compose up --build\n# docker buildx bake --set \"*.platform=linux/amd64\" --load\nversion: \"2.4\"\nservices:\n app:\n build: .\n image: pam_oidc:v0.1.4\n environment:\n - DJ_AUTH_USER\n - DJ_AUTH_PASSWORD\n - DJ_AUTH_TOKEN\n # - RUSTFLAGS=-C link-arg=-undefined\n # - RUSTFLAGS=-C target-feature=-crt-static\n command: tail -f /dev/null\n volumes:\n - ./config/service_example:/etc/pam.d/oidc # add a 'oidc' config that utilizes pam_oidc\n - ./config/libpam_oidc.yaml:/etc/datajoint/libpam_oidc.yaml # add pam_oidc-specific config\n - ./tests/test.py:/workspace/test.py # python test\n - ./pam-oidc:/workspace/pam-oidc # mount source for dev\n" }, { "alpha_fraction": 0.7185741066932678, "alphanum_fraction": 0.7335835099220276, "avg_line_length": 29.632183074951172, "blob_id": "b916391f8f307c1669000346e39a8fddf692e1e1", "content_id": "77cb47c3cfb8e5efd8dba9f03c575c41576e7de5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2665, "license_type": "no_license", "max_line_length": 315, "num_lines": 87, "path": "/README.md", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "# Rust Demo\n\n## Deploy Instructions\n\n1. Acquire (see the [releases](https://github.com/datajoint-company/pam-oauth2/releases) page) or build (see below) the appropriate `libpam_oidc.so` dynamic clib binary for your platform that provides the PAM interface to authenticate via an OIDC provider.\n1. Copy `libpam_oidc.so` into the appropriate directory that your system expects new modules to be loaded e.g. on Debian, it is located in `/lib/x86_64-linux-gnu/security/`.\n1. Create a service config file within the directory that your system expects for PAM e.g. on Debian, it is located in `/etc/pam.d/`. We can for instance create a service/file called `oidc` with the following contents (note the argument in the 1st line should be the path where `pam_oidc`'s config will be located):\n\n ```text\n auth sufficient libpam_oidc.so /etc/datajoint/libpam_oidc.yaml\n account optional libpam_oidc.so\n ```\n\n See [service_example](./config/service_example) for more info.\n\n1. In the path provided to the service config, create a config file for `pam_oidc`. See [libpam_oidc_example.yaml](./config/libpam_oidc_example.yaml) for more info.\n1. Configure your PAM-compatible application/service to point to the `oidc` service we just created. For a few examples, see [test.sh](./tests/test.sh).\n\n## Developer Instructions\n\n### Build\n\n```bash\ncd ./pam-oidc && cargo build; cd .. # DEBUG\ncd ./pam-oidc && cargo build --release; cd .. # PROD\n```\n\n### Validate PAM with test cases\n\nCreate `.env` file in the root directory with the following:\n```\nDJ_AUTH_USER=\nDJ_AUTH_PASSWORD=\nDJ_AUTH_TOKEN=\n```\nSee tests in `tests` subdirectory. The header comment gives hints how to run them.\n\n## --- Old Notes ---\n\n### Start\n\nTo do in local folder\n`cargo init`\n\n\n### Debug\n\n`cargo run`\n\n### Build (debug, prod)\n\n`cargo build`\n`cargo build --release`\n\n\n### test case (needed to install gcc, g++, openssl, libressl-dev, pkgconfig, OPENSSL_DIR=/etc/ssl)\n\n*as root\n\napk add g++ libressl-dev\napt-get install libssl-dev pkg-config -y\napt-get install musl-tools -y\n\napt-get install libssl-dev pkg-config build-essential libpam0g-dev libpam0g -y\n\n*as user\n\ncd /workspace/pam-oidc\n\ncargo build\n\necho shh | PAM_TYPE=auth PAM_USER=raphael ./pam_oidc/target/release/pam_oidc ./sample.yaml\n\n\n## cross-compile\n\nrustup target add x86_64-unknown-linux-gnu\nrustup target add x86_64-unknown-linux-musl\nrustup show\ncargo build --target x86_64-unknown-linux-musl --features vendored\ncargo build --release --target x86_64-unknown-linux-musl\n\n## testing (current on 07/01/21)\n\ncp pam-oidc/test /etc/pam.d/\ncp pam-oidc/target/debug/libpam_oidc.so /lib/x86_64-linux-gnu/security/\npython3 /workspace/test.py\n" }, { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.6392452716827393, "avg_line_length": 26.040817260742188, "blob_id": "40c66c2dc947b1748b86fee70dad8452108e61df", "content_id": "06c30d97daa23f174973a0264f9a53a58cf78070", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1325, "license_type": "no_license", "max_line_length": 109, "num_lines": 49, "path": "/Dockerfile", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "# 131MB\n# FROM rust:alpine\n# 185 MB\n# FROM rust:slim-stretch\n# 196 MB\n# FROM rust:slim-buster\n# 425 MB\nFROM rust:buster\n# 438 MB\n# FROM rust:stretch\n\n# RUN \\\n# curl -sSOL https://github.com/cdr/code-server/releases/download/v3.3.1/code-server_3.3.1_amd64.deb && \\\n# dpkg -i code-server_3.3.1_amd64.deb\n\nRUN \\\n export uid=1000 gid=0 && \\\n mkdir -p /home/rust_dev && \\\n echo \"rust_dev:x:${uid}:${gid}:Developer,,,:/home/rust_dev:/bin/sh\" >> /etc/passwd && \\\n # echo \"dja:x:${uid}:\" >> /etc/group && \\\n chown ${uid}:${gid} -R /home/rust_dev\n\nRUN \\\n # apk add gdb git && \\\n apt-get update && apt-get install gdb git -y\n# && \\\n# mkdir -p /workspace/pam-rs/pam-http/target/release\n# && \\\n# gdbserver :2345 ./target/debug/app\n\nRUN \\\n ln -s /lib/x86_64-linux-gnu/libpam.so.0 /lib/x86_64-linux-gnu/libpam.so && \\\n ln -s /workspace/pam-oidc/target/debug/libpam_oidc.so /lib/x86_64-linux-gnu/security/libpam_oidc.so && \\\n apt-get install python3-pip -y && \\\n pip3 install python-pam && \\\n mkdir -p /workspace/pam-oidc && \\\n chown 1000:0 /workspace && \\\n chown 1000:0 /workspace/pam-oidc\n\n\nUSER rust_dev\nENV USER rust_dev\nENV HOME /home/rust_dev\n\n\nWORKDIR /workspace\n\nCOPY --chown=1000:0 pam-oidc/src /workspace/pam-oidc/src\nCOPY --chown=1000:0 pam-oidc/Cargo.toml /workspace/pam-oidc/\n" }, { "alpha_fraction": 0.5319490432739258, "alphanum_fraction": 0.5370866060256958, "avg_line_length": 36.37200164794922, "blob_id": "7db3c995c117c102d0b69153046172ffb28b57dc", "content_id": "22259850ed456372f0cb5b1254145cb726929995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 9343, "license_type": "no_license", "max_line_length": 91, "num_lines": 250, "path": "/pam-oidc/src/lib.rs", "repo_name": "vathes/pam-oauth2", "src_encoding": "UTF-8", "text": "// general\nuse std::str::FromStr;\n// crate metadata\nextern crate pkg_version;\nuse pkg_version::{\n pkg_version_major,\n pkg_version_minor,\n pkg_version_patch,\n};\n// pam\n#[macro_use]\nextern crate pamsm;\nuse pamsm::{\n PamServiceModule,\n PamLibExt,\n Pam,\n PamFlags,\n PamError,\n};\nstruct PamCustom;\n// yaml\nextern crate yaml_rust;\nuse std::fs::File;\nuse std::io::prelude::*;\nuse yaml_rust::yaml::Yaml;\nuse yaml_rust::YamlLoader;\n// json\nextern crate serde_json;\nuse serde_json::Value;\n// oauth2\nextern crate base64;\nextern crate oauth2;\n// extern crate url;\nuse oauth2::{\n AuthUrl,\n ClientId,\n ClientSecret,\n ResourceOwnerPassword,\n ResourceOwnerUsername,\n Scope,\n TokenResponse,\n TokenUrl,\n};\nuse oauth2::basic::{\n BasicClient\n};\nuse oauth2::reqwest::http_client;\n// logging\nextern crate log;\nextern crate log4rs;\nextern crate log_panics;\nextern crate rand;\nuse log::{\n error,\n info,\n debug,\n LevelFilter,\n};\nuse log4rs::append::console::ConsoleAppender;\nuse log4rs::append::file::FileAppender;\nuse log4rs::encode::pattern::PatternEncoder;\nuse log4rs::config::{\n Appender,\n Config,\n Root,\n};\nuse rand::Rng;\n// interface\nimpl PamServiceModule for PamCustom {\n fn authenticate(_pamh: Pam, _flags: PamFlags, _args: Vec<String>) -> PamError {\n // Load libpam_oidc.so's YAML config file\n let crate_version = format!(\"{}.{}.{}\", pkg_version_major!(), pkg_version_minor!(),\n pkg_version_patch!());\n let config_file = &_args[0];\n let config = load_file(config_file);\n // Initiate logger\n let mut rng = rand::thread_rng();\n let log_id: u32 = rng.gen();\n log_panics::init();\n let stdout = ConsoleAppender::builder()\n .encoder(Box::new(PatternEncoder::new(format!(\n \"[{{d(%Y-%m-%d %H:%M:%S%.3f)}}][pam-oidc][{}][{{l}}][{}]: {{m}}{{n}}\",\n crate_version,\n log_id,\n )\n .as_str()\n )))\n .build();\n let file = FileAppender::builder()\n .encoder(Box::new(PatternEncoder::new(format!(\n \"[{{d(%Y-%m-%d %H:%M:%S%.3f)}}][{}][{{l}}][{}]: {{m}}{{n}}\",\n crate_version,\n log_id,\n )\n .as_str()\n )))\n .build(config[0][\"log.path\"].as_str().unwrap().to_string())\n .unwrap();\n let log_config = Config::builder()\n .appender(Appender::builder().build(\"stdout\", Box::new(stdout)))\n .appender(Appender::builder().build(\"file\", Box::new(file)))\n .build(\n Root::builder().appender(\"stdout\").appender(\"file\")\n .build(LevelFilter::from_str(&config[0][\"log.level\"].as_str().unwrap()\n .to_string()).unwrap())\n )\n .unwrap();\n match log4rs::init_config(log_config) {\n Ok(_) => debug!(\"Logging successfully initialized.\"),\n Err(error) => debug!(\"Encountered error initializing log file: {}\", error),\n };\n // Initialize user and password supplied\n info!(\"Auth detected. Proceeding...\");\n let pam_user = match _pamh.get_user(None) {\n Ok(Some(u)) => u.to_str().unwrap(),\n Ok(None) => return PamError::USER_UNKNOWN,\n Err(e) => return e,\n };\n debug!(\"pam_user: {}\", pam_user);\n let pam_password = match _pamh.get_authtok(None) {\n Ok(Some(p)) => p.to_str().unwrap(),\n Ok(None) => return PamError::AUTH_ERR,\n Err(e) => return e,\n };\n debug!(\"config_file: {}\", config_file);\n debug!(\"client id: {}\", config[0][\"client.id\"].as_str().unwrap());\n debug!(\"client secret: {}\", config[0][\"client.secret\"].as_str().unwrap());\n debug!(\"url auth: {}\", config[0][\"url.auth\"].as_str().unwrap());\n debug!(\"url token: {}\", config[0][\"url.token\"].as_str().unwrap());\n debug!(\"url userinfo: {}\", config[0][\"url.userinfo\"].as_str().unwrap());\n debug!(\"input min_size: {}\", config[0][\"token.min_size\"].as_i64().unwrap());\n debug!(\"requested scopes: {}\", config[0][\"scopes\"].as_str().unwrap().to_string());\n debug!(\"pam_password: {}\", pam_password);\n debug!(\"actual pass_size: {}\", pam_password.len() as i64);\n info!(\"Inputs read.\");\n let mut access_token = pam_password.to_string();\n if pam_password.len() as i64 > config[0][\"token.min_size\"].as_i64().unwrap() {\n // If passing bearer token as password\n info!(\"Check as token.\");\n } else {\n // If passing password directly\n info!(\"Check as password.\");\n let client =\n BasicClient::new(\n ClientId::new(config[0][\"client.id\"].as_str().unwrap().to_string()),\n Some(ClientSecret::new(\n config[0][\"client.secret\"].as_str().unwrap().to_string()\n )),\n AuthUrl::new(String::from(config[0][\"url.auth\"].as_str().unwrap()))\n .unwrap(),\n Some(TokenUrl::new(\n String::from(config[0][\"url.token\"].as_str().unwrap())\n )\n .unwrap()),\n );\n let token_result = client.exchange_password(\n &ResourceOwnerUsername::new(pam_user.to_string().clone()),\n &ResourceOwnerPassword::new(pam_password.to_string())\n )\n .add_scope(Scope::new(config[0][\"scopes\"].as_str().unwrap().to_string()))\n .request(http_client);\n access_token = match token_result {\n Ok(tok) => tok.access_token().secret().to_string(),\n Err(e) => {\n error!(\"Wrong password provided. Details: {:?}\", e);\n return PamError::AUTH_ERR\n },\n };\n }\n // Determine assigned scopes\n debug!(\"access_token: {}\", access_token);\n let jwt_payload = access_token.split('.').collect::<Vec<&str>>()[1];\n debug!(\"jwt_payload: {}\", jwt_payload);\n let jwt_payload_decoded = base64::decode(jwt_payload).unwrap();\n let jwt_payload_str = std::str::from_utf8(&jwt_payload_decoded).unwrap();\n debug!(\"jwt_payload_str: {}\", jwt_payload_str);\n let jwt_payload: Value = serde_json::from_str(&jwt_payload_str).unwrap();\n let assigned_scopes = jwt_payload.get(\"scope\").unwrap().as_str().unwrap();\n debug!(\"assigned_scopes: {}\", assigned_scopes);\n // Verify token\n info!(\"Verifying token.\");\n let body = reqwest::blocking::Client::new()\n .get(config[0][\"url.userinfo\"].as_str().unwrap())\n .header(\"Authorization\", format!(\"Bearer {}\", access_token))\n .send()\n .unwrap()\n .text()\n .unwrap();\n debug!(\"body: {}\", body);\n let json: Value = serde_json::from_str(&body).unwrap();\n debug!(\"token's user: {:?}\", json.get(\"sub\"));\n let username = json.get(config[0][\"username.key\"].as_str().unwrap().to_string());\n let required_scopes = config[0][\"scopes\"].as_str().unwrap().to_string();\n let scopes_satisfied = subset(assigned_scopes, &required_scopes);\n // let scopes_satisfied = &required_scopes == assigned_scopes;\n if username != None &&\n pam_user == username.unwrap() &&\n scopes_satisfied {\n // If username defined in token, it matches pam_user, and the scopes satisfy\n info!(\"Auth success!\");\n PamError::SUCCESS\n } else if username == None {\n // If username not defined in token\n error!(\"Token invalid error.\");\n PamError::AUTH_ERR\n } else {\n debug!(\"user defined? {}\", username != None);\n debug!(\"user matches? {}\", pam_user == username.unwrap());\n debug!(\"scopes satisfied? {}\", scopes_satisfied);\n error!(\"Auth failed!\");\n PamError::AUTH_ERR\n }\n }\n\n fn chauthtok(_pamh: Pam, _flags: PamFlags, _args: Vec<String>) -> PamError {\n PamError::SUCCESS\n }\n\n fn open_session(_pamh: Pam, _flags: PamFlags, _args: Vec<String>) -> PamError {\n PamError::SUCCESS\n }\n\n fn close_session(_pamh: Pam, _flags: PamFlags, _args: Vec<String>) -> PamError {\n PamError::SUCCESS\n }\n \n fn setcred(_pamh: Pam, _flags: PamFlags, _args: Vec<String>) -> PamError {\n PamError::SUCCESS\n }\n \n fn acct_mgmt(_pamh: Pam, _flags: PamFlags, _args: Vec<String>) -> PamError {\n PamError::SUCCESS\n }\n}\n\npub fn subset(parent: &str, child: &str) -> bool {\n let assigned_scopes = parent.split_whitespace().collect::<Vec<&str>>();\n let mut required_scopes = child.split_whitespace();\n let scopes_satisfied = required_scopes.all(|item| assigned_scopes.contains(&item));\n return scopes_satisfied;\n}\n\npub fn load_file(file: &str) -> std::vec::Vec<Yaml> {\n let mut file = File::open(file).expect(\"Unable to open file\");\n let mut contents = String::new();\n file.read_to_string(&mut contents).expect(\"Unable to read file\");\n YamlLoader::load_from_str(&contents).unwrap()\n}\n\npam_module!(PamCustom);\n" } ]
7
hirneagabriel/SD-Tema1
https://github.com/hirneagabriel/SD-Tema1
58e91cbfb4baa708712cdda9d06f1416eb13e77c
dd6fffce6580ee9d887f0c105055284005f4a057
99715cd6a7543d8f6737cdeec017482847de6a32
refs/heads/master
2022-04-12T04:23:05.459390
2020-03-26T12:16:59
2020-03-26T12:16:59
250,247,759
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7953890562057495, "alphanum_fraction": 0.8097983002662659, "avg_line_length": 48.14285659790039, "blob_id": "fdb6fcb7fbb431ee81781c65df659315dd49caa9", "content_id": "f7f3b1998d2e30c00a1f0df2a23858682a34f98c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 347, "license_type": "no_license", "max_line_length": 108, "num_lines": 7, "path": "/README.md", "repo_name": "hirneagabriel/SD-Tema1", "src_encoding": "UTF-8", "text": "\nfisierul \"vectoritext.txt\" contine nr de elemente ce trebuie sortate si cel mai mare numar posibil din sir\n\nfisierul \"output.txt\" contine timpii de sortare pentru fiecare algoritm si un mesajul de validare a sortarii\n\nfisierul \"vectorisortati.txt\" contine sirurile sortate\n\nBubleSort a fost limitat pentru a putea sorta pana la 10000 de numere\n\n\n" }, { "alpha_fraction": 0.4739536941051483, "alphanum_fraction": 0.4868655502796173, "avg_line_length": 25.05813980102539, "blob_id": "e099730aef0c4dc7b42e0017a2a3305a04dce59a", "content_id": "b2fdd6e676c15d7aec3d59862f788aee246489a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4492, "license_type": "no_license", "max_line_length": 91, "num_lines": 172, "path": "/proiect1.py", "repo_name": "hirneagabriel/SD-Tema1", "src_encoding": "UTF-8", "text": "import random\nimport math\n\ndef verificare(v, arr):\n if len(v) != len(arr):\n return 0\n x = sorted(arr)\n for i in range(len(arr)):\n if v[i] != x[i]:\n return 0\n return 1\n\ndef CountSort(v):\n MAX = max(v)\n output = [0] * len(v)\n if MAX <= 10**10:\n n = len(v)\n j = 0\n vf = [0 for i in range(MAX+1)]\n for i in range(n):\n vf[v[i]] += 1\n for i in range(MAX+1):\n while vf[i] > 0:\n output[j] = i\n j += 1\n vf[i] -= 1\n return output\n\ndef CountSortRadix(vector, nrcifre, baza):\n lungime = len(vector)\n output = [0]*lungime\n vf = [0] * int(baza)\n for i in range(lungime):\n cifra = (vector[i] // baza ** nrcifre) % baza\n vf[cifra] = vf[cifra] + 1\n for i in range(1,baza):\n vf[i] = vf[i] + vf[i-1]\n for m in range(lungime-1, -1, -1):\n cifra = (vector[m] // baza ** nrcifre) % baza\n vf[cifra] = vf[cifra] - 1\n output[vf[cifra]] = vector[m]\n return output\n\ndef RadixSort(v, baza):\n MAX = max(v)\n output= v\n nrcifre = int(math.floor(math.log(MAX,baza)+1))\n for i in range(nrcifre):\n output = CountSortRadix(output,i,baza)\n return output\n\n\ndef interclasare(lst, ldr):\n i = 0\n j = 0\n rez=[]\n while i < len(lst) and j < len(ldr):\n if lst[i] <= ldr[j]:\n rez.append(lst[i])\n i += 1\n else:\n rez.append(ldr[j])\n j += 1\n rez.extend(lst[i:])\n rez.extend(ldr[j:])\n return rez\n\ndef MergeSort(ls):\n if len(ls) <= 1:\n return ls\n else:\n mij = len(ls)//2\n lst = MergeSort(ls[:mij])\n ldr = MergeSort(ls[mij:])\n return interclasare(lst,ldr)\n\ndef BFPRT(arr):\n n=len(arr)\n if n <= 5:\n return sorted(arr)[n//2]\n sbl = [sorted(arr[i:i+5]) for i in range(0, n, 5)]\n med = [x[len(x)//2] for x in sbl]\n return BFPRT(med)\n\ndef QuickSort(v):\n n = len(v)\n if n == 0:\n return []\n if n == 1:\n return v\n pivot = BFPRT(v)\n lst = []\n egal = []\n ldr = []\n for x in v:\n if x < pivot:\n lst.append(x)\n elif x == pivot:\n egal.append(x)\n else:\n ldr.append(x)\n lst = QuickSort(lst)\n ldr = QuickSort(ldr)\n lst.extend(egal)\n lst.extend(ldr)\n return lst\n\ndef BubbleSort(v):\n output = [i for i in v]\n n = len(v)\n if n>10000:\n return [0]\n for i in range(n):\n ok = False\n for j in range(0, n - i - 1):\n if output[j] > output[j+1]:\n c=output[j]\n output[j] = output[j+1]\n output[j+1] = c\n ok = True\n if ok == False:\n break\n return output\n\nprint(\"cate baze doriti sa testati pentru RadixSort?\")\nnrb = int(input(\"numar baze:\"))\nprint(\"introduce-ti bazele:\")\nbaze=[int(input()) for i in range(nrb)]\n\nh = open(\"vectorisortati.txt\",\"w\")\nf=open(\"vectoritext.txt\",\"r\")\ndate=f.readline().split()\ng=open(\"output.txt\",\"w\")\nwhile date:\n numbers = []\n for x in range(int(date[0])):\n numbers.append(random.randint(1, int(date[1])))\n g.write(\"Sortari pentru \" + date[0] + \" de numere cu valori pana in \" + date[1] + '\\n')\n sortari = [sorted, BubbleSort, CountSort, MergeSort, QuickSort,RadixSort]\n from datetime import datetime\n\n for sortare in sortari:\n g.write(str(sortare) + '\\n')\n if sortare == RadixSort:\n for i in range(nrb):\n start = datetime.now()\n x = RadixSort(numbers, baze[i])\n g.write(\"RadixSort pentru baza \"+str(baze[i]) + '\\n')\n g.write(str(datetime.now() - start) + '\\n')\n if verificare(x,numbers)==1:\n g.write(\"sortarea a trecut testul de validare\" + '\\n')\n else:\n g.write(\"eroare de validare\" +'\\n')\n g.write('\\n')\n else:\n start = datetime.now()\n x = sortare(numbers)\n t = datetime.now() - start\n g.write(str(datetime.now() - start) + '\\n')\n if verificare(x, numbers) == 1:\n g.write(\"sortarea a trecut testul de validare\" + '\\n')\n else:\n g.write(\"eroare de validare\" + '\\n')\n g.write('\\n')\n h.write(str(x)+'\\n')\n h.write('\\n')\n date = f.readline().split()\n g.write('\\n')\n g.write('\\n')\nh.close()\ng.close()\nf.close()\n\n\n\n\n\n\n\n\n\n\n" } ]
2
mboneil10/webscraping-practice
https://github.com/mboneil10/webscraping-practice
39c8b02342dbf414501dbaa9af3d7aa433eb9a0d
121c4986b26bb8cd1ce2a33db01e0e2b32252af7
1be126cf419b8d15c2eeff1439c19d1713a1d934
refs/heads/master
2022-11-25T14:53:34.396937
2020-08-01T16:01:20
2020-08-01T16:01:20
282,265,799
0
0
null
2020-07-24T16:18:10
2020-07-24T16:43:44
2020-07-25T02:09:22
Python
[ { "alpha_fraction": 0.80859375, "alphanum_fraction": 0.80859375, "avg_line_length": 63, "blob_id": "5eda047f17e1864cbe6d574472b8f5b484c114eb", "content_id": "9d4c4a91cb6f7163492922b532f055f5653d175c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 256, "license_type": "no_license", "max_line_length": 128, "num_lines": 4, "path": "/README.md", "repo_name": "mboneil10/webscraping-practice", "src_encoding": "UTF-8", "text": "# webscraping-practice\nWeb scraping is the technique of extracting data from websites and saving it locally to your computer.\n\nThis repository is the result of the step-by-step webscraping tutorial on https://www.edureka.co/blog/web-scraping-with-python/.\n" }, { "alpha_fraction": 0.6871657967567444, "alphanum_fraction": 0.6983957290649414, "avg_line_length": 31.824562072753906, "blob_id": "f4e481e5d56921e47bbe8a245cf4e696fa5eed31", "content_id": "bb3c5da3d87cb26ea1e50dc5a735293c8597e1a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1870, "license_type": "no_license", "max_line_length": 95, "num_lines": 57, "path": "/webscrape.py", "repo_name": "mboneil10/webscraping-practice", "src_encoding": "UTF-8", "text": "# Steps below are from https://www.edureka.co/blog/web-scraping-with-python/\n# 1 *** Find the URL that you want to scrape\n # Look at the website's `/robots.txt` file to determine if the website allows web scraping.\n\n# 2 *** Inspecting the Page\n # Look at the specific divs by Inspecting the page\n\n# 3 *** Find the data you want to extract\n\n# 4 *** Write the code\n# For testing on the web\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n# Parses HTML and XML\nfrom bs4 import BeautifulSoup\n# Data manipulation and analysis\nimport pandas as pd\n\n# Set the webdriver to use Chrome browser. (This really doesn't matter to me).\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n# This is the page we are extracting data from. The one in the example was no longer relevant.\npage = \"https://www.mass.gov/service-details/missing-persons\"\n# Intialize data variables\nheadings = []\nvalues = []\nvalues2 = []\ncount = 0\n# Add more later on.\n\n# \"Open\" the page\ndriver.get(page)\n\ncontent = driver.page_source\nsoup = BeautifulSoup(content, features = \"html.parser\")\n\n# This code doesn't account for when we don't find the data we want\n# Data isn't found in the given example. Using mass.gov now.\n# Search for the data we want\nfor data in soup.findAll('tr', attrs={}):\n if count < 11:\n heading = data.find('td', attrs={}).text.strip()\n headings.append(heading)\n value = (data.findAll('td', attrs={})[1]).text.strip()\n values.append(value)\n else:\n value2 = (data.findAll('td', attrs={})[1]).text.strip()\n values2.append(value2)\n count = count + 1\n\ndriver.close()\n\n# 5 *** Run the code and extract the data\n\n# 6 *** Store the data in the required format\ndf = pd.DataFrame({'Heading':headings,'Value1':values,'Value2':values2}).T\ndf.to_csv('products.csv', index=False, encoding='utf-8')" } ]
2
boysingwong/scinapsis
https://github.com/boysingwong/scinapsis
ec7274848f5a62a8d3a58a4d42d9ebb6961d5b4a
ff46b1d0dfbef7b4a4e2382e3053f176bc38a205
dfa942bd1c0e46e18b472204fb5266dc80a55d61
refs/heads/master
2020-05-29T18:34:06.969199
2015-10-23T13:31:45
2015-10-23T13:31:45
42,066,721
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5053061246871948, "alphanum_fraction": 0.5191836953163147, "avg_line_length": 29.625, "blob_id": "c29b0eb72324a5da95cfcc1d7c36c02d1d94b73b", "content_id": "5b4e180023605fa4908ded4893ef5cc42f16ef93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 114, "num_lines": 40, "path": "/scin/migrations/0003_auto_20150204_0007.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scin', '0002_pub_result'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='pub_support_info',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('section_id', models.IntegerField()),\n ('header', models.CharField(max_length=800)),\n ('content', models.TextField()),\n ('url', models.CharField(max_length=100)),\n ('doc_id', models.ForeignKey(to='scin.pub_meta')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='pub_meta',\n name='pdf_address',\n field=models.CharField(max_length=200),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='pub_meta',\n name='src_address',\n field=models.CharField(max_length=200),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.49544626474380493, "alphanum_fraction": 0.5060109496116638, "avg_line_length": 36.60274124145508, "blob_id": "e9f0f64cfdd8757d4f851a11ed642fa43957c1f4", "content_id": "937109f6799936af692b54a3031058273ea45294", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2745, "license_type": "no_license", "max_line_length": 114, "num_lines": 73, "path": "/scin/migrations/0001_initial.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='pub_figure',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('figure_id', models.IntegerField()),\n ('header', models.CharField(max_length=800)),\n ('content', models.TextField()),\n ('url', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='pub_material_n_method',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('section_id', models.IntegerField()),\n ('header', models.CharField(max_length=800)),\n ('content_seq', models.IntegerField()),\n ('content', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='pub_meta',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('doc_id', models.CharField(max_length=50)),\n ('src_address', models.CharField(max_length=100)),\n ('pdf_address', models.CharField(max_length=100)),\n ('publisher', models.CharField(max_length=100)),\n ('title', models.CharField(max_length=800)),\n ('editors', models.CharField(max_length=200)),\n ('pub_date', models.DateField()),\n ('copyright', models.TextField()),\n ('data_availibility', models.TextField()),\n ('funding', models.TextField()),\n ('competing_interest', models.TextField()),\n ('rec_update_time', models.DateTimeField(auto_now=True)),\n ('rec_update_by', models.CharField(max_length=20)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='pub_material_n_method',\n name='doc_id',\n field=models.ForeignKey(to='scin.pub_meta'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='pub_figure',\n name='doc_id',\n field=models.ForeignKey(to='scin.pub_meta'),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.6594885587692261, "alphanum_fraction": 0.6796770095825195, "avg_line_length": 30, "blob_id": "0aed4ca35459120c199f086dafa3a82419b917b5", "content_id": "8006310a77786d54aa47cabca5c0125909bcc65c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 743, "license_type": "no_license", "max_line_length": 157, "num_lines": 24, "path": "/scripts/pathway_search/flush_pw_temp_tables.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import sys\nimport MySQLdb\nimport codecs\nfrom warnings import filterwarnings\n\ndef flush_pw_temp_tables(doc_id):\n\tfilterwarnings('ignore', category = MySQLdb.Warning)\n\tsys.stdout = codecs.getwriter('utf8')(sys.stdout)\n\ttry:\n\t\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\t\tmysql_cursor = mysql.cursor()\n\t\t\n\t\t# call search protein keywords\n\t\targs = [doc_id]\n\t\tmysql_cursor.callproc( 'scin_db.pub_flush_pw_temp_tables', args )\n\n\t\tmysql_cursor.close()\n\t\tmysql.close()\n\n\texcept MySQLdb.Error, e:\n\t\terrmsg = \"MySQL Error (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\t\twith open(\"error.log\", 'w') as w:\n\t\t\tw.write(errmsg)\n\t\tsys.exit(1)" }, { "alpha_fraction": 0.5334821343421936, "alphanum_fraction": 0.578125, "avg_line_length": 21.399999618530273, "blob_id": "9c7b7a4ea3add06efd31ae4189459348ab3a6aff", "content_id": "a5769c06a929e7a0b0fa4d6d095d921d728c5748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/scin/migrations/0005_pub_meta_citation.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scin', '0004_auto_20150315_1932'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='pub_meta',\n name='citation',\n field=models.CharField(max_length=800, null=True),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.682196319103241, "alphanum_fraction": 0.6892678737640381, "avg_line_length": 39.03333282470703, "blob_id": "d44475d2c00e2396bd4d0a208c277ccf8b3157ba", "content_id": "9c878c1352c9c00414d3f374257de0b65b3b929d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2404, "license_type": "no_license", "max_line_length": 145, "num_lines": 60, "path": "/scripts/Plosone/spiders/patching_spider.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem\nfrom scin.models import pub_meta\n\nclass PlosonePatchSpider(CrawlSpider):\n name = \"PlosonePatch\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n doc_id = 0\n\t\n def __init__(self, *args, **kwargs): \n super(PlosonePatchSpider, self).__init__(*args, **kwargs) \n self.start_urls = [kwargs.get('start_url')] \n self.doc_id = int(kwargs.get('doc_id'))\n\t\n def parse(self, response):\n\t\theaderList = response.xpath(\"//div[contains(@id,'section')]/h2/text()\").extract()\t\t# WARNING: content structure changed\n\t\tdoc_instance = pub_meta.objects.get(id=self.doc_id)\n\t\t\n\t\t# find section id having title \"Supporting Information\"\n\t\tcount = 0\n\t\tsiHeaderNb = 0\n\t\tfor header in headerList:\n\t\t\tif header == \"Supporting Information\":\n\t\t\t\tsiHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\n\t\t# assign supporting information section selector\n\t\tresultSelectorStr = \"//div[@id='section%d']\" % siHeaderNb\n\t\tresultSelector = response.xpath(resultSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']//h3/a/text()\" % siHeaderNb\n\t\tsubHeaderList = resultSelector.xpath(subHeaderListStr).extract()\n\t\t\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\txpathTitleListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/h3/a/text()\" % (siHeaderNb, headerSeq)\n\t\t\t\txpathUrlListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/h3/a/@href\" % (siHeaderNb, headerSeq)\n\t\t\t\txpathContentListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/p[@class='preSiDOI']/text()\" % (siHeaderNb, headerSeq)\n\t\t\t\t\n\t\t\t\theaderList = response.xpath(xpathTitleListStr).extract()\n\t\t\t\turlList = response.xpath(xpathUrlListStr).extract()\n\t\t\t\tcontentList = response.xpath(xpathContentListStr).extract()\n\t\t\t\t\n\t\t\t\titem = pubSIItem()\n\t\t\t\titem['doc'] = doc_instance\n\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\tif len(headerList) > 0:\n\t\t\t\t\titem['header'] = headerList[0]\n\t\t\t\tif len(urlList) > 0:\n\t\t\t\t\titem['url'] = urlList[0]\n\t\t\t\tif len(contentList) > 0:\n\t\t\t\t\titem['content'] = contentList[0]\n\t\t\t\titem.save()\n\t\t\t\theaderSeq = headerSeq + 1\n\t\t" }, { "alpha_fraction": 0.4822954833507538, "alphanum_fraction": 0.5030525326728821, "avg_line_length": 23.08823585510254, "blob_id": "c12936f0cdcdf11871d4754ddd0cad5e1bc2c880", "content_id": "2116b9da4fa66cc990acae164aabbc5084d10d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 47, "num_lines": 34, "path": "/scin/migrations/0004_auto_20150315_1932.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scin', '0003_auto_20150204_0007'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='pub_figure',\n old_name='doc_id',\n new_name='doc',\n ),\n migrations.RenameField(\n model_name='pub_material_n_method',\n old_name='doc_id',\n new_name='doc',\n ),\n migrations.RenameField(\n model_name='pub_result',\n old_name='doc_id',\n new_name='doc',\n ),\n migrations.RenameField(\n model_name='pub_support_info',\n old_name='doc_id',\n new_name='doc',\n ),\n ]\n" }, { "alpha_fraction": 0.5233786106109619, "alphanum_fraction": 0.529411792755127, "avg_line_length": 26.06122398376465, "blob_id": "b88d92f3b80f69e8e6ce186cf5a805ace85a075b", "content_id": "93688d30852ad454fe22d30251cf70dec15d0b88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 62, "num_lines": 49, "path": "/scin/migrations/0006_auto_20150421_0058.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scin', '0005_pub_meta_citation'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='pub_meta',\n name='author',\n field=models.CharField(max_length=800, null=True),\n preserve_default=True,\n ),\n migrations.RenameField(\n model_name='pub_meta',\n\t\t\told_name='citation',\n new_name='citation_str',\n ),\n migrations.AddField(\n model_name='pub_meta',\n name='saves',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='pub_meta',\n name='shares',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='pub_meta',\n name='views',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n\t\tmigrations.AddField(\n model_name='pub_meta',\n name='citation',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.6332639455795288, "alphanum_fraction": 0.6650635004043579, "avg_line_length": 39.72806930541992, "blob_id": "4dfb946fa05738f484d85844e58c7f7768cd850b", "content_id": "c7b60e47eba99ce3387af0ffcfd65713a2583a0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13931, "license_type": "no_license", "max_line_length": 345, "num_lines": 342, "path": "/scripts/Plosone/spiders/Plosone_spider.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nimport urllib2\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem\nfrom scin.models import pub_meta\nfrom django import db\n\nclass PlosoneSpider(CrawlSpider):\n name = \"Plosone\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n start_urls = [\n\t\t#'http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0061362'\t\t# TODO: input parameter #2\n\t\t#'http://www.plosone.org/search/advanced?pageSize=15&sort=&queryField=publication_date&startDateAsString=2013-01-01&endDateAsString=2013-01-10&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2013-01-10T23%3A59%3A59Z]+&journalOpt=some&filterJournals=PLoSONE&subjectCatOpt=all&filterArticleTypesOpt=all'\n\t\t'http://www.plosone.org/search/advanced?searchName=&weekly=&monthly=&startPage=0&pageSize=60&filterKeyword=&resultView=&unformattedQuery=publication_date%3A%5B2012-01-01T00%3A00%3A00Z+TO+2013-01-01T23%3A59%3A59Z%5D&x=0&y=0&sort=Relevance&filterStartDate=&filterEndDate=&filterJournals=PLoSONE'\n ]\n # single page: 'http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0061362'\n\t# single page2: 'http://www.plosone.org/article/info:doi%2F10.1371%2Fjournal.pone.0054089'\n # 20130101 to 20130110: 'http://www.plosone.org/search/advanced?pageSize=15&sort=&queryField=publication_date&startDateAsString=2013-01-01&endDateAsString=2013-01-10&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2013-01-10T23%3A59%3A59Z]+&journalOpt=some&filterJournals=PLoSONE&subjectCatOpt=all&filterArticleTypesOpt=all'\n\t# 2012 year: 'http://www.plosone.org/search/advanced?searchName=&weekly=&monthly=&startPage=0&pageSize=60&filterKeyword=&resultView=&unformattedQuery=publication_date%3A%5B2012-01-01T00%3A00%3A00Z+TO+2013-01-01T23%3A59%3A59Z%5D&x=0&y=0&sort=Relevance&filterStartDate=&filterEndDate=&filterJournals=PLoSONE'\n # 2013 year: 'http://www.plosone.org/search/advanced?searchName=&weekly=&monthly=&startPage=0&pageSize=60&filterKeyword=&resultView=&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2014-01-01T23%3A59%3A59Z]&sort=Relevance&filterStartDate=&filterEndDate=&filterJournals=PLoSONE'\n\t# 2014 year: 'http://www.plosone.org/search/advanced?searchName=&weekly=&monthly=&startPage=0&pageSize=60&filterKeyword=&resultView=&unformattedQuery=publication_date%3A%5B2014-01-01T00%3A00%3A00Z+TO+2015-01-01T23%3A59%3A59Z%5D&x=7&y=6&sort=Relevance&filterStartDate=&filterEndDate=&filterJournals=PLoSONE'\n \n rules = (\n ###===RULES FOR NEXT PAGE LINK===\n # allow: allows certain link url patterns to be followed.\n # restricted_xpaths: xpath for the next button to follow.\n Rule (SgmlLinkExtractor(allow=(\".+\", ), ###\n restrict_xpaths=(\n \"//div[@class='pagination']/a[@class='next']\",) ###\n ), follow=True),\n\t\t\n ###===RULES FOR AD PAGE LINK===\n # allow: link patterns for ads to click\n # callback: function name for processing ad page after visiting it\n # restricted_xpaths: xpaths under which the links are located.\n Rule (SgmlLinkExtractor(allow=(\".*/article/.+\", ), deny=(\".*/search/.+\"), # ext saccurent\n restrict_xpaths=(\n \"//div[@class='main']/ul[@id='search-results']//span[@class='article']/a\")\n ),\n callback=\"parse_item\", follow=False),\n )\n counter = 0;\n\t\t\n def parse_item(self, response):\n\t\tdocHeader = self.parseHeader(response)\n\t\tself.parseMNM(response, docHeader)\n\t\tself.parseResult(response, docHeader)\n\t\tself.parseFigure(response, docHeader)\n\t\tself.parseSI(response, docHeader)\n\t\t\n\t\tself.counter += 1;\n\t\turl_name = response.url\n\t\tdb.reset_queries()\n\t\tprint \"[RESULT] scrap paper #%d\" % self.counter\n\t\tprint \"[RESULT] url=%s\" % url_name\n #documentId = self.parseHeader(response)\n #self.parseMNM(response, documentId)\n\t\t\n def parseHeader(self, response):\n\t\tpublisher = \"Plos One\"\t\t\t\t# TODO: input parameter #1\n\t\tsrc_address = response.url\t\t\t# self.start_urls[0]\n\t\tpdf_address = response.xpath(\"//div[@class='dload-pdf']//a/@href\").extract()\n\t\ttitle = response.xpath(\"//h1[@id='artTitle']/text()\").extract()\n\t\t\t\t\n\t\tdoc_id = \"\"\n\t\teditors = \"\"\n\t\tpub_date = \"\"\n\t\tcopyright = \"\"\n\t\tdata_availibility = \"\"\n\t\tfunding = \"\"\n\t\tcompeting_interest = \"\"\n\t\tcitation = \"\"\n\t\t\n\t\tinfoList = response.xpath(\"//div[@class='articleinfo']/p\")\n\t\tfor infoContent in infoList:\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=doi:).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tdoc_id = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Editor: ).*\\n*.*\")\n\t\t\tif len(content) > 0:\n\t\t\t\teditors = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Published: )[A-Za-z]+ [0-9]+, [0-9]+\")\n\t\t\tif len(content) > 0:\n\t\t\t\tpub_date = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Copyright: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tcopyright = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Data Availability: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tdata_availibility = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Funding: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tfunding = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Competing interests: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tcompeting_interest = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Citation: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tcitation = content\n\t\t\n\t\t# author\n\t\tauthor = \"\"\n\t\tauthorlist = response.xpath(\"//div[@class='title-authors']//a[@class='author-name']/text()\").extract()\n\n\t\tfor authorname in authorlist:\n\t\t\tauthorname = authorname.replace(\"\\n\", \"\").strip()\n\t\t\tauthor = author + authorname\n\t\t\n\t\t# citations\n\t\tpoi = doc_id[0]\n\t\tsource_id = \"cited,viewed,saved,discussed\"\n\n\t\turl = 'http://alm.plos.org:80/api/v5/articles?ids=%s&source_id=%s' % (poi, source_id)\n\n\t\tdata = json.load(urllib2.urlopen(url))\n\t\tviews = data['data'][0]['viewed']\n\t\tsaves = data['data'][0]['saved']\n\t\tshares = data['data'][0]['discussed']\n\t\tcitationNum = data['data'][0]['cited']\n\t\t\n\t\t# update time ad other info\n\t\trec_update_time = datetime.now()\n\t\trec_update_by = \"sys\"\n\t\t\n\t\t# debug messages\n\t\t#print \"publisher = %s\" % publisher\n\t\t#print \"src_address = %s\" % src_address\n\t\t#print \"pdf_address = %s\" % pdf_address\n\t\t#print \"doc_id = %s\" % doc_id\n\t\t#print \"title = %s\" % title\n\t\t#print \"editors = %s\" % editors\n\t\t#print \"pub_date = %s\" % pub_date\n\t\t#print \"copyright = %s\" % copyright\n\t\t#print \"data_availibility = %s\" % data_availibility\n\t\t#print \"funding = %s\" % funding\n\t\t#print \"competing_interest = %s\" % competing_interest\n \n\t\t# write to database\n\t\titem = pubMetaItem()\n\t\titem['publisher'] = publisher\n\t\tif len(pdf_address) > 0:\n\t\t\titem['pdf_address'] = pdf_address[0]\n\t\titem['src_address'] = src_address\n\t\titem['doc_id'] = doc_id[0]\n\t\titem['title'] = title[0]\n\t\tif len(editors) > 0:\n\t\t\titem['editors'] = editors[0]\n\t\tif len(pub_date) > 0:\n\t\t\titem['pub_date'] = datetime.strptime(pub_date[0], '%B %d, %Y')\t\t\t# convert to djan\n\t\tif len(copyright) > 0:\n\t\t\titem['copyright'] = copyright[0]\n\t\tif len(data_availibility) > 0:\n\t\t\titem['data_availibility'] = data_availibility[0]\n\t\tif len(funding) > 0:\n\t\t\titem['funding'] = funding[0]\n\t\tif len(competing_interest) > 0:\n\t\t\titem['competing_interest'] = competing_interest[0]\n\t\tif len(citation) > 0:\n\t\t\titem['citation'] = citation[0]\n\t\t\n\t\titem['author'] = author\n\t\titem['views'] = views\n\t\titem['saves'] = saves\n\t\titem['shares'] = shares\n\t\titem['citation'] = citationNum\n\t\t\n\t\titem['rec_update_time'] = datetime.now()\t\t\t# TODO: use GMT instead\n\t\titem['rec_update_by'] = \"sys\"\n\t\tdocHeader = item.save()\n\t\t\n\t\treturn docHeader\n\n def parseMNM(self, response, docHeader):\n\t\theaderList = response.xpath(\"//div[starts-with(@id,'section')]/h2/text()\").extract()\n\t\t\n\t\t# find section id having title \"Materials and Methods\"\n\t\tcount = 0\n\t\tmnmHeaderNb = 0\n\t\tfor header in headerList:\n\t\t\tif header == \"Materials and Methods\" or header == \"Methods\":\n\t\t\t\tmnmHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\n\t\t# assign M&M section selector\n\t\tmnmSelectorStr = \"//div[@id='section%d']\" % mnmHeaderNb\n\t\tmnmSelector = response.xpath(mnmSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']/h3/text()\" % mnmHeaderNb\n\t\tsubHeaderList = mnmSelector.xpath(subHeaderListStr).extract()\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\t#subHeaderStr = \"//h4[%d]\" % headerSeq\n\t\t\t\tsubHeaderStr = \"//div[@id='section%d']/h3[%d]\" % (mnmHeaderNb, headerSeq)\n\t\t\t\tfor h4 in mnmSelector.xpath(subHeaderStr):\n\t\t\t\t\tparagraphs = h4.xpath(\"\"\"set:difference(./following-sibling::p,\n\t\t\t\t\t\t\t\t\t\t\t\t\t./following-sibling::h3[1]/following-sibling::p)\"\"\").extract()\n\t\t\t\t\tcontentSeq = 1\n\t\t\t\t\tfor prgrph in paragraphs:\n\t\t\t\t\t\titem = pubMNMItem()\n\t\t\t\t\t\titem['doc'] = docHeader\n\t\t\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\t\t\titem['header'] = subHeader\n\t\t\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\t\t\titem['content'] = prgrph\n\t\t\t\t\t\titem.save()\n\t\t\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\theaderSeq = headerSeq + 1\n\t\telse:\n\t\t\tparagraphs = mnmSelector.xpath(\"p\")\n\t\t\tcontentSeq = 1\n\t\t\tfor prgrph in paragraphs:\n\t\t\t\titem = pubMNMItem()\n\t\t\t\titem['doc'] = docHeader\n\t\t\t\titem['section_id'] = 1\n\t\t\t\titem['header'] = \"\"\n\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\titem['content'] = prgrph.xpath(\"string()\").extract()\n\t\t\t\titem.save()\n\t\t\t\tcontentSeq = contentSeq + 1\n\t\n def parseResult(self, response, docHeader):\n\t\theaderList = response.xpath(\"//div[starts-with(@id,'section')]/h2/text()\").extract()\n\t\t\n\t\t# find section id having title \"Results\"\n\t\tcount = 0\n\t\tresultHeaderNb = 0\n\t\tfor header in headerList:\n\t\t\tif header == \"Results\":\n\t\t\t\tresultHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\n\t\t# assign M&M section selector\n\t\tresultSelectorStr = \"//div[@id='section%d']\" % resultHeaderNb\n\t\tresultSelector = response.xpath(resultSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']/h3/text()\" % resultHeaderNb\n\t\tsubHeaderList = resultSelector.xpath(subHeaderListStr).extract()\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\t#subHeaderStr = \"//h4[%d]\" % headerSeq\n\t\t\t\tsubHeaderStr = \"//div[@id='section%d']/h3[%d]\" % (resultHeaderNb, headerSeq)\n\t\t\t\tfor h4 in resultSelector.xpath(subHeaderStr):\n\t\t\t\t\tparagraphs = h4.xpath(\"\"\"set:difference(./following-sibling::p,\n\t\t\t\t\t\t\t\t\t\t\t\t\t./following-sibling::h3[1]/following-sibling::p)\"\"\").extract()\n\t\t\t\t\tcontentSeq = 1\n\t\t\t\t\tfor prgrph in paragraphs:\n\t\t\t\t\t\titem = pubResultItem()\n\t\t\t\t\t\titem['doc'] = docHeader\n\t\t\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\t\t\titem['header'] = subHeader\n\t\t\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\t\t\titem['content'] = prgrph\n\t\t\t\t\t\titem.save()\n\t\t\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\theaderSeq = headerSeq + 1\n\t\telse:\n\t\t\tparagraphs = resultSelector.xpath(\"p\")\n\t\t\tcontentSeq = 1\n\t\t\tfor prgrph in paragraphs:\n\t\t\t\titem = pubResultItem()\n\t\t\t\titem['doc'] = docHeader\n\t\t\t\titem['section_id'] = 1\n\t\t\t\titem['header'] = \"\"\n\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\titem['content'] = prgrph.xpath(\"string()\").extract()\n\t\t\t\titem.save()\n\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\n def parseFigure(self, response, docHeader):\n\t\tfigIdList = response.xpath(\"//div[contains(@class,'figure')]/@data-doi\").extract()\n\t\t\n\t\titemId = 0\n\t\tfor figId in figIdList:\n\t\t\txpathHeaderStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/div[contains(@class, 'figcaption')]/text()\" % figId\n\t\t\txpathContentStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/p[2]\" % figId\n\t\t\txpathUrlStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/div[contains(@class, 'img-box')]/a/@href\" % figId\n\t\t\t\n\t\t\theaderList = response.xpath(xpathHeaderStr).extract()\n\t\t\tcontentList = response.xpath(xpathContentStr).extract()\n\t\t\turlList = response.xpath(xpathUrlStr).extract()\n\t\t\t\n\t\t\titem = pubFigureItem()\n\t\t\titem['doc'] = docHeader\n\t\t\titem['figure_id'] = itemId\n\t\t\tif len(headerList) > 0:\n\t\t\t\titem['header'] = headerList[0]\n\t\t\tif len(contentList) > 0:\n\t\t\t\titem['content'] = contentList[0]\n\t\t\tif len(urlList) > 0:\n\t\t\t\titem['url'] = urlList[0]\n\t\t\titem.save()\n\t\t\titemId = itemId + 1\n\t\t\t\t\n def parseSI(self, response, docHeader):\n\t\theaderList = response.xpath(\"//div[contains(@id,'section')]/h2/text()\").extract()\t\t# WARNING: content structure changed\n\t\t\n\t\t# find section id having title \"Supporting Information\"\n\t\tcount = 0\n\t\tsiHeaderNb = 0\n\t\tfor header in headerList:\n\t\t\tif header == \"Supporting Information\":\n\t\t\t\tsiHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\n\t\t# assign supporting information section selector\n\t\tresultSelectorStr = \"//div[@id='section%d']\" % siHeaderNb\n\t\tresultSelector = response.xpath(resultSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']//h3/a/text()\" % siHeaderNb\n\t\tsubHeaderList = resultSelector.xpath(subHeaderListStr).extract()\n\t\t\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\txpathTitleListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/h3/a/text()\" % (siHeaderNb, headerSeq)\n\t\t\t\txpathUrlListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/h3/a/@href\" % (siHeaderNb, headerSeq)\n\t\t\t\txpathContentListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/p[@class='preSiDOI']/text()\" % (siHeaderNb, headerSeq)\n\t\t\t\t\n\t\t\t\theaderList = response.xpath(xpathTitleListStr).extract()\n\t\t\t\turlList = response.xpath(xpathUrlListStr).extract()\n\t\t\t\tcontentList = response.xpath(xpathContentListStr).extract()\n\t\t\t\t\n\t\t\t\titem = pubSIItem()\n\t\t\t\titem['doc'] = docHeader\n\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\tif len(headerList) > 0:\n\t\t\t\t\titem['header'] = headerList[0]\n\t\t\t\tif len(urlList) > 0:\n\t\t\t\t\titem['url'] = urlList[0]\n\t\t\t\tif len(contentList) > 0:\n\t\t\t\t\titem['content'] = contentList[0]\n\t\t\t\titem.save()\n\t\t\t\theaderSeq = headerSeq + 1\n\t\t" }, { "alpha_fraction": 0.6471177935600281, "alphanum_fraction": 0.659649133682251, "avg_line_length": 38.900001525878906, "blob_id": "6c7a47f3b479e34899e1a48c9b8e4c4119e2d925", "content_id": "06ff6d18e78484b492a8c0660f74e9a1356cdbfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1995, "license_type": "no_license", "max_line_length": 157, "num_lines": 50, "path": "/scripts/product_tech_search/run_protein_tech.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import codecs\nimport MySQLdb\nimport re\nimport sys\nfrom warnings import filterwarnings\n\ndef search_protein_tech(doc_id):\n\tfilterwarnings('ignore', category = MySQLdb.Warning)\n\tsys.stdout = codecs.getwriter('utf8')(sys.stdout)\n\ttry:\n\t\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\t\tmysql_cursor = mysql.cursor()\n\t\t\n\t\t# call search protein keywords\n\t\targs = [doc_id]\n\t\tmysql_cursor.callproc( 'scin_db.pub_technique_protein_exists', args )\n\n\t\tquery = (\"SELECT protein_gene_name, tech_parental_name, tech_alternative, figure_id, content FROM scin_db.pub_tech_protein_temp\")\n\t\tmysql_cursor.execute(query)\n\t\t\n\t\trsltCount = 0\n\t\tfor (protein_gene_name, tech_parental_name, tech_alternative, figure_id, content) in mysql_cursor:\n\t\t outputStr = \"result: %s, %s, %s, %d \" % (protein_gene_name, tech_parental_name, tech_alternative, figure_id)\n\t\t \n\t\t pat1 = ur'\\b%s\\b.*?(\\b|-)%s\\b' % (tech_alternative, protein_gene_name)\n\t\t pat2 = ur'(\\b|-)%s\\b.*?\\b%s\\b' % (protein_gene_name, tech_alternative)\n\t\t \n\t\t sentenceList = re.split(ur'(?<!\\w\\.\\w.)(?<![A-Z]\\.)(?<=\\.|\\?)\\s', content)\n\t\t for sentence in sentenceList:\n\t\t\t #print 'check[%s]' % sentence\n\t\t\t result1 = re.search(pat1, sentence)\n\t\t\t result2 = re.search(pat2, sentence)\n\t\t\t if result1 or result2:\n\t\t\t\tinsertStmt = (\"INSERT INTO scin_db.pub_tech_protein_result \"\n\t\t\t\t\t\t\t \"(doc_id, protein_gene_name, tech_parental_name, tech_alternative, figure_id, sentence) \"\n\t\t\t\t\t\t\t \"VALUES (%s, %s, %s, %s, %s, %s)\")\n\t\t\t\tmysql_cursor.execute(insertStmt, (doc_id, protein_gene_name, tech_parental_name, tech_alternative, int(figure_id), sentence) )\n\t\t\t\tmysql.commit()\n\t\t\t\trsltCount = rsltCount + 1\n\n\t\tmysql_cursor.close()\n\t\tmysql.close()\n\t\t\n\t\treturn rsltCount\n\n\texcept MySQLdb.Error, e:\n\t\terrmsg = \"MySQL Error @run_protein_tech (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\t\twith open(\"error.log\", 'w') as w:\n\t\t\tw.write(errmsg)\n\t\tsys.exit(1)\n" }, { "alpha_fraction": 0.649779736995697, "alphanum_fraction": 0.6703377366065979, "avg_line_length": 29.266666412353516, "blob_id": "de141890be21724c68cf2587b8285f9488e4d093", "content_id": "414b4197e1907a6eb36bb70be7957a637f31665e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1362, "license_type": "no_license", "max_line_length": 156, "num_lines": 45, "path": "/scripts/pathway_search/search_pw_main.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import sys\nimport MySQLdb\nimport codecs\n\nimport run_pw_action_word\nimport run_pw_protein\nimport run_pathway\nimport flush_pw_temp_tables\n\nfrom warnings import filterwarnings\n\nsys.setrecursionlimit(20000)\nfilterwarnings('ignore', category = MySQLdb.Warning)\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\ntry:\n\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\tmysql_cursor = mysql.cursor()\n\t\n\tmysql_cursor.execute(\"SELECT id FROM scin_db.scin_pub_meta ht \"\n\t\t\t\t\t\t\t\"WHERE EXISTS ( \"\n\t\t\t\t\t\t\t\"SELECT 1 FROM scin_db.scin_pub_result rt \"\n\t\t\t\t\t\t\t\"WHERE ht.id = rt.doc_id_id \"\n\t\t\t\t\t\t\t\") \"\n\t\t\t\t\t\t\t\"AND id BETWEEN 1 and 2500 \"\n\t\t\t\t\t\t\t\"ORDER BY id\")\n\t\n\tfor (id) in mysql_cursor:\n\t doc_id = id\n\t print \"procesing doc_id: %d\" % doc_id\n\t actionWordResult = run_pw_action_word.search_action_word(doc_id)\n\t if actionWordResult > 0:\n\t protResult = run_pw_protein.search_pw_protein(doc_id)\n\t if protResult > 0:\n\t\t run_pathway.search_pathway(doc_id)\n\t flush_pw_temp_tables.flush_pw_temp_tables(doc_id)\n\t print \"doc_id [%d] search completed\" % doc_id\n\t\n\tmysql_cursor.close()\n\tmysql.close()\n\nexcept MySQLdb.Error, e:\n\terrmsg = \"MySQL Error (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\twith open(\"error.log\", 'w') as w:\n\t\tw.write(errmsg)\n\tsys.exit(1)\n" }, { "alpha_fraction": 0.6616113781929016, "alphanum_fraction": 0.6777251362800598, "avg_line_length": 30.969696044921875, "blob_id": "bed5021705e91af30cb2b2dfe83cf0702612e9d8", "content_id": "0135cc4ea5c428c127ae488ea499bf503b0e4e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 157, "num_lines": 33, "path": "/scripts/product_tech_search_by_sup/run_supplier_by_sup.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import sys\nimport MySQLdb\nimport codecs\nfrom warnings import filterwarnings\n\ndef search_supplier(doc_id, supplier_id):\n\tfilterwarnings('ignore', category = MySQLdb.Warning)\n\tsys.stdout = codecs.getwriter('utf8')(sys.stdout)\n\ttry:\n\t\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\t\tmysql_cursor = mysql.cursor()\n\t\t\n\t\t# call search supplier keywords\n\t\targs = [doc_id, supplier_id]\n\t\tmysql_cursor.callproc( 'scin_db.pub_supplier_exists_by_sup', args )\n\n\t\tquery = (\"SELECT count(1) as count FROM scin_db.pub_supplier_result WHERE doc_id = %s AND supplier_id = %s\")\n\t\tmysql_cursor.execute(query, (doc_id,supplier_id,))\n\n\t\tfor (count) in mysql_cursor:\n\t\t countStr = \"count: %d \" %count\n\t\t #print countStr\n\n\t\tmysql_cursor.close()\n\t\tmysql.close()\n\t\t\n\t\treturn count[0]\n\n\texcept MySQLdb.Error, e:\n\t\terrmsg = \"MySQL Error @run_supplier (@%s) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\t\twith open(\"error.log\", 'w') as w:\n\t\t\tw.write(errmsg)\n\t\tsys.exit(1)\n" }, { "alpha_fraction": 0.6364883184432983, "alphanum_fraction": 0.6598079800605774, "avg_line_length": 28.755102157592773, "blob_id": "406e8d2c0317ab3164cd3b82220bc04d6199a69b", "content_id": "dbcd42afc9add7be828247de49fa131f4d3857d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1458, "license_type": "no_license", "max_line_length": 156, "num_lines": 49, "path": "/scripts/protein_tech_search/search_main10.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import sys\nimport MySQLdb\nimport codecs\n\nimport run_technique\nimport run_protein\nimport run_protein_tech\nimport flush_temp_tables\n\nfrom warnings import filterwarnings\n\nsys.setrecursionlimit(20000)\nfilterwarnings('ignore', category = MySQLdb.Warning)\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\ntry:\n\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\tmysql_cursor = mysql.cursor()\n\t\n\tmysql_cursor.execute(\"SELECT id FROM scin_db.scin_pub_meta ht \"\n\t\t\t\t\t\t\t\"WHERE EXISTS ( \"\n\t\t\t\t\t\t\t\"SELECT 1 FROM scin_db.scin_pub_figure ft \"\n\t\t\t\t\t\t\t\"WHERE ht.id = ft.doc_id_id \"\n\t\t\t\t\t\t\t\") \"\n\t\t\t\t\t\t\t\"AND EXISTS ( \"\n\t\t\t\t\t\t\t\"SELECT 1 FROM scin_db.scin_pub_material_n_method mt \"\n\t\t\t\t\t\t\t\"WHERE mt.id = mt.doc_id_id \"\n\t\t\t\t\t\t\t\") \"\n\t\t\t\t\t\t\t\"AND id BETWEEN 22501 and 25000 \"\n\t\t\t\t\t\t\t\"ORDER BY id\")\n\t\n\tfor (id) in mysql_cursor:\n\t doc_id = id\n\t print \"procesing doc_id: %d\" % doc_id\n\t techResult = run_technique.search_tech(doc_id)\n\t if techResult > 0:\n\t\tprotResult = run_protein.search_protein(doc_id)\n\t\tif protResult > 0:\n\t\t\trun_protein_tech.search_protein_tech(doc_id)\n\t flush_temp_tables.flush_temp_tables(doc_id)\n\t print \"doc_id [%d] search completed\" % doc_id\n\t\n\tmysql_cursor.close()\n\tmysql.close()\n\nexcept MySQLdb.Error, e:\n\terrmsg = \"MySQL Error (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\twith open(\"error.log\", 'w') as w:\n\t\tw.write(errmsg)\n\tsys.exit(1)\n" }, { "alpha_fraction": 0.6967095732688904, "alphanum_fraction": 0.7048164010047913, "avg_line_length": 37.77777862548828, "blob_id": "9f3baca2ce626210d01c3c5c76e02bed18ffbaa6", "content_id": "b5471b6307a7afd72a914a786472c2ae184fc06b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2097, "license_type": "no_license", "max_line_length": 775, "num_lines": 54, "path": "/scripts/product_tech_search/prgph_search.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport codecs\nimport sys\nimport re\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\n\ncontent = \"Bar graphs showing the expression of (A) FPR2/ALX mRNA in the rat ipsilateral spinal cord plotted versus time following carrageenan injection to the hind paw, presented as a percent of mRNA levels in control (naive) rat spinal cord. Each bar represents the mean + S.E.M, n= 6-10. * represents a significant difference at p<0.05 as compared with naive spinal cord. FPR2/ALX mRNA is expressed in the rat (B) and human astrocytes (C). GPR32 mRNA is expressed in human astrocytes (C). mRNA levels are expressed as relative units and each bar represents the mean + S.E.M for three repeats. The immunohistochemical images show the expression of FPR2/ALX (D), the astrocyte marker GFAP (E) and the colocalization of FPR2/ALX and GFAP (F) in naive rat lumbar spinal cord.\"\ntechnique = \"immunohistochemical\"\t\t# western blotting, western blot, IP\nproduct = \"FPR2\"\n\n# 1. split sentence\nsentenceList = re.split(ur'(?<!\\w\\.\\w.)(?<![A-Z]\\.)(?<=\\.|\\?)\\s', content)\n\nfor sentence in sentenceList:\n\ttechnique_exists = False\n\tprotein_exists = False\n\t\n\tprint \"process sentence [%s]\" % sentence\n\n\t# a. check technique exists\n\t#if technique in sentence:\n\t\n\tpattern = ur'(?i)\\b%s\\b' % (technique)\n\tif re.search(pattern, sentence):\n\t\tprint \"technique found [%s]\" % technique\n\t\ttechnique_exists = True\n\telse:\n\t\tcontinue\n\t\t\n \n\t# b. check protein exists:\n\tsentence = sentence.replace(\"(\", \"\")\n\tsentence = sentence.replace(\")\", \"\")\n\twordList = re.split('\\s|-', sentence)\n\t\n\tfor word in wordList:\n\t\tif len(word) < 3:\n\t\t\tcontinue\n\n\t\tif word.lower() == product.lower():\n\t\t\tprint \"product found exact match [%s]\" % product\n\t\t\tprotein_exists = True\n\t\telif word in product:\n\t\t\tprint \"product found exact partial match [%s]\" % product\n\t\t\tprotein_exists = True\n\t\telse:\n\t\t\tcontinue\n\t\n\n\tif technique_exists and protein_exists:\n\t\tprint \"RESULT: [%s] contains technique & product \" % sentence\n\t\tprint \"RESULT: technique = %s \" % technique\n\t\tprint \"RESULT: product = %s \" % product\n\t\t# process insert into result\n\n\t\t" }, { "alpha_fraction": 0.5458823442459106, "alphanum_fraction": 0.5552940964698792, "avg_line_length": 25.5625, "blob_id": "4bcb830e35563794ca3076b6f6b426b755b323e1", "content_id": "178e73ea49bc2377b935dcc787dc54b8da0e11ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 62, "num_lines": 32, "path": "/scin/migrations/0009_auto_20150608_0001.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scin', '0008_pub_abstract_pub_discussion'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='pub_abstract',\n name='content_seq',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='pub_abstract',\n name='header',\n field=models.CharField(max_length=800, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='pub_abstract',\n name='section_id',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.647662878036499, "alphanum_fraction": 0.654036819934845, "avg_line_length": 35.675323486328125, "blob_id": "aedda8796657c58e949019f964b683bbe0624c0a", "content_id": "ace2054fc826fc709f6a98c23115858e9c525868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2824, "license_type": "no_license", "max_line_length": 163, "num_lines": 77, "path": "/scripts/product_tech_search_by_sup/run_product_tech_by_sup.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import codecs\nimport MySQLdb\nimport re\nimport sys\nfrom warnings import filterwarnings\n\ndef search_product_tech(doc_id, supplier_id):\n\tfilterwarnings('ignore', category = MySQLdb.Warning)\n\tsys.stdout = codecs.getwriter('utf8')(sys.stdout)\n\ttry:\n\t\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\t\tmysql_cursor = mysql.cursor()\n\t\t\n\t\t# call search protein keywords\n\t\targs = [doc_id, supplier_id]\n\t\tmysql_cursor.callproc( 'scin_db.pub_technique_product_exists_by_sup', args )\n\t\t\n\t\tquery = (\"SELECT figure_id, tech_id, tech_parental_name, tech_alternative, prod_id, supplier, catalog_nb, product_name, content FROM scin_db.pub_tech_prod_temp\")\n\t\tmysql_cursor.execute(query)\n\t\t\n\t\trsltCount = 0\n\t\tfor (figure_id, tech_id, tech_parental_name, tech_alternative, prod_id, supplier, catalog_nb, product_name, content) in mysql_cursor:\n\t\t outputStr = \"result: %s, %s, %s, %d \" % (tech_parental_name, tech_alternative, product_name, figure_id)\n\t\t \n\t\t #print \"process figure#: %d \" % figure_id\n\t\t \n\t\t sentenceList = re.split(ur'(?<!\\w\\.\\w.)(?<![A-Z]\\.)(?<=\\.|\\?)\\s', content)\n\t\t for sentence in sentenceList:\n\t\t\ttechnique_exists = False\n\t\t\tprotein_exists = False\n\t\t\t\n\t\t\t#print \"process sentence [%s]\" % sentence\n\n\t\t\t# a. check tech_alternative exists\n\t\t\tpattern = ur'(?i)\\b%s\\b' % (tech_alternative)\n\t\t\tif re.search(pattern, sentence):\n\t\t\t\t#print \"tech_alternative found [%s]\" % tech_alternative\n\t\t\t\ttechnique_exists = True\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# b. check protein exists:\n\t\t\tsentence = sentence.replace(\"(\", \"\")\n\t\t\tsentence = sentence.replace(\")\", \"\")\n\t\t\twordList = re.split('\\s|-', sentence)\n\t\t\t\n\t\t\tfor word in wordList:\n\t\t\t\tif len(word) < 3:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif word.lower() == product_name.lower():\n\t\t\t\t\t#print \"product_name found exact match [%s]\" % product_name\n\t\t\t\t\tprotein_exists = True\n\t\t\t\telif word in product_name:\n\t\t\t\t\t#print \"product_name found exact partial match [%s]\" % product_name\n\t\t\t\t\tprotein_exists = True\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tif technique_exists and protein_exists:\n\t\t\t\tinsertStmt = (\"INSERT INTO scin_db.pub_tech_prod_result \"\n\t\t\t\t\t\t\t \"(doc_id, figure_id, tech_id, tech_parental_name, tech_alternative, prod_id, supplier, catalog_nb, product_name, sentence) \"\n\t\t\t\t\t\t\t \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n\t\t\t\tmysql_cursor.execute(insertStmt, (doc_id, figure_id, tech_id, tech_parental_name, tech_alternative, prod_id, supplier, catalog_nb, product_name, sentence) )\n\t\t\t\tmysql.commit()\n\t\t\t\trsltCount = rsltCount + 1\n\n\t\tmysql_cursor.close()\n\t\tmysql.close()\n\t\t\n\t\treturn rsltCount\n\n\texcept MySQLdb.Error, e:\n\t\terrmsg = \"MySQL Error @run_product_tech (@%s) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\t\twith open(\"error.log\", 'w') as w:\n\t\t\tw.write(errmsg)\n\t\tsys.exit(1)\n" }, { "alpha_fraction": 0.738135576248169, "alphanum_fraction": 0.760169506072998, "avg_line_length": 34.208953857421875, "blob_id": "81193ac1b18232a6272c3e931aa3426bc978366d", "content_id": "0791ba4ebf2dada18c510c9629f8f5715f75766c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2360, "license_type": "no_license", "max_line_length": 59, "num_lines": 67, "path": "/scin/models.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\nclass pub_meta(models.Model):\n\tdoc_id = models.CharField(max_length=50)\n\tsrc_address = models.CharField(max_length=200)\n\tpdf_address = models.CharField(max_length=200)\n\tpublisher = models.CharField(max_length=100)\n\ttitle = models.CharField(max_length=800)\n\teditors = models.CharField(max_length=200)\n\tpub_date = models.DateField()\n\tcopyright = models.TextField()\n\tdata_availibility = models.TextField()\n\tfunding = models.TextField()\n\tcompeting_interest = models.TextField()\n\trec_update_time = models.DateTimeField(auto_now=True)\n\trec_update_by = models.CharField(max_length=20)\n\tcitation_str = models.CharField(max_length=800, null=True)\n\tauthor = models.CharField(max_length=800, null=True)\n\tsaves = models.IntegerField(null=True)\n\tviews = models.IntegerField(null=True)\n\tcitation = models.IntegerField(null=True)\n\tshares = models.IntegerField(null=True)\n\nclass pub_material_n_method(models.Model):\n\tdoc = models.ForeignKey(pub_meta)\n\tsection_id = models.IntegerField()\n\theader = models.CharField(max_length=800)\n\tcontent_seq = models.IntegerField()\n\tcontent = models.TextField()\n\t\nclass pub_result(models.Model):\n\tdoc = models.ForeignKey(pub_meta)\n\tsection_id = models.IntegerField()\n\theader = models.CharField(max_length=800)\n\tcontent_seq = models.IntegerField()\n\tcontent = models.TextField()\n\nclass pub_figure(models.Model):\n\tdoc = models.ForeignKey(pub_meta)\n\tfigure_id = models.IntegerField()\n\theader = models.CharField(max_length=800)\n\tcontent = models.TextField()\n\turl = models.CharField(max_length=200, null=True)\n\tthumbnail = models.CharField(max_length=200, null=True)\n\nclass pub_support_info(models.Model):\n\tdoc = models.ForeignKey(pub_meta)\n\tsection_id = models.IntegerField()\n\theader = models.CharField(max_length=800)\n\tcontent = models.TextField()\n\turl = models.CharField(max_length=100)\n\t\nclass pub_abstract(models.Model):\n\tdoc = models.ForeignKey(pub_meta)\n\tsection_id = models.IntegerField(null=True)\n\theader = models.CharField(max_length=800, null=True)\n\tcontent_seq = models.IntegerField(null=True)\n\tcontent = models.TextField()\n\t\nclass pub_discussion(models.Model):\n\tdoc = models.ForeignKey(pub_meta)\n\tsection_id = models.IntegerField()\n\theader = models.CharField(max_length=800)\n\tcontent_seq = models.IntegerField()\n\tcontent = models.TextField()\n\t" }, { "alpha_fraction": 0.6957720518112183, "alphanum_fraction": 0.6994485259056091, "avg_line_length": 34.032257080078125, "blob_id": "4642b40b96b92009ea003a99bac3720ee7c43048", "content_id": "5e9d313939b4a779e2ce46c2c4fa55787faf87c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1088, "license_type": "no_license", "max_line_length": 90, "num_lines": 31, "path": "/scripts/Plosone/spiders/patching_meta_spider.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem\nfrom scin.models import pub_meta\n\nclass PlosonePatchSpider(CrawlSpider):\n name = \"PlosoneMetaPatch\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n doc_id = 0\n\t\n def __init__(self, *args, **kwargs): \n super(PlosonePatchSpider, self).__init__(*args, **kwargs) \n self.start_urls = [kwargs.get('start_url')] \n self.doc_id = int(kwargs.get('doc_id'))\n\t\n def parse(self, response):\n\t\tdoc_instance = pub_meta.objects.get(id=self.doc_id)\n\t\t\n\t\t# find articleInfo\n\t\tinfoList = response.xpath(\"//div[@class='articleinfo']/p\")\n\t\tfor infoContent in infoList:\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Citation: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tcitation = content\n\t\t\n\t\tif len(citation) > 0:\n\t\t\tdoc_instance.citation = citation[0]\n\t\t\tdoc_instance.save()\n\t\t" }, { "alpha_fraction": 0.6240710616111755, "alphanum_fraction": 0.652504026889801, "avg_line_length": 39.306190490722656, "blob_id": "b548c53de5595d592678720f3b548f06489a8c1d", "content_id": "cd11737ba6d8f4ee45f8948f86604878ab3b40f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12380, "license_type": "no_license", "max_line_length": 345, "num_lines": 307, "path": "/scripts/Plosone/spiders/testing_single_spider.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem\nfrom scin.models import pub_meta\n\nclass PlosoneSpider(CrawlSpider):\n name = \"PlosoneTest\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n start_urls = [\n\t\t'http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053807'\t\t# TODO: input parameter #2\n\t\t#'http://www.plosone.org/search/advanced?pageSize=15&sort=&queryField=publication_date&startDateAsString=2013-01-01&endDateAsString=2013-01-10&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2013-01-10T23%3A59%3A59Z]+&journalOpt=some&filterJournals=PLoSONE&subjectCatOpt=all&filterArticleTypesOpt=all'\n\t\t#'http://www.plosone.org/search/advanced?pageSize=15&sort=&queryField=publication_date&startDateAsString=2013-01-01&endDateAsString=2013-01-10&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2013-01-10T23%3A59%3A59Z]+&journalOpt=some&filterJournals=PLoSONE&subjectCatOpt=all&filterArticleTypesOpt=all'\n ]\n # single page: 'http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0061362'\n\t# single page2: 'http://www.plosone.org/article/info:doi%2F10.1371%2Fjournal.pone.0054089'\n # 20130101 to 20130110: 'http://www.plosone.org/search/advanced?pageSize=15&sort=&queryField=publication_date&startDateAsString=2013-01-01&endDateAsString=2013-01-10&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2013-01-10T23%3A59%3A59Z]+&journalOpt=some&filterJournals=PLoSONE&subjectCatOpt=all&filterArticleTypesOpt=all'\n # 2013 year: 'http://www.plosone.org/search/advanced?searchName=&weekly=&monthly=&startPage=0&pageSize=60&filterKeyword=&resultView=&unformattedQuery=publication_date%3A[2013-01-01T00%3A00%3A00Z+TO+2014-01-01T23%3A59%3A59Z]&sort=Relevance&filterStartDate=&filterEndDate=&filterJournals=PLoSONE'\n \n counter = 0;\n\t\t\n def parse(self, response):\n\t\t#self.parseHeader(response)\n\t\t#self.parseMNM(response)\n\t\t#self.parseResults(response)\n\t\t#self.parseFigure(response)\n\t\tself.parseSI(response)\n\t\t\n\t\tself.counter += 1;\n\t\turl_name = response.url\n\t\tprint \"[RESULT] scrap paper #%d\" % self.counter\n\t\tprint \"[RESULT] url=%s\" % url_name\n #documentId = self.parseHeader(response)\n #self.parseMNM(response, documentId)\n\t\t\n def parseHeader(self, response):\n\t\tpublisher = \"Plos One\"\t\t\t\t# TODO: input parameter #1\n\t\tsrc_address = response.url\t\t\t# self.start_urls[0]\n\t\tpdf_address = response.xpath(\"//div[@class='download']//a/@href\").xpath(\"string()\").extract()\n\t\ttitle = response.xpath(\"//h1[@id='artTitle']/text()\").extract()[0]\n\t\t\t\t\n\t\tdoc_id = \"\"\n\t\teditors = \"\"\n\t\tpub_date = \"\"\n\t\tcopyright = \"\"\n\t\tdata_availibility = \"\"\n\t\tfunding = \"\"\n\t\tcompeting_interest = \"\"\n\t\t\n\t\tinfoList = response.xpath(\"//div[@class='articleinfo']/p\")\n\t\tfor infoContent in infoList:\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=doi:).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tdoc_id = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Editor: ).*\\n*.*\")\n\t\t\tif len(content) > 0:\n\t\t\t\teditors = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Published: )[A-Za-z]+ [0-9]+, [0-9]+\")\n\t\t\tif len(content) > 0:\n\t\t\t\tpub_date = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Copyright: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tcopyright = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Data Availability: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tdata_availibility = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Funding: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tfunding = content\n\t\t\tcontent = infoContent.xpath(\"string()\").re(r\"(?<=Competing interests: ).*\")\n\t\t\tif len(content) > 0:\n\t\t\t\tcompeting_interest = content\n\t\t\n\t\trec_update_time = datetime.now()\n\t\trec_update_by = \"sys\"\n\t\t\n\t\t# debug messages\n\t\tprint \"publisher = %s\" % publisher\n\t\tprint \"src_address = %s\" % src_address\n\t\tprint \"pdf_address = %s\" % pdf_address\n\t\tprint \"doc_id = %s\" % doc_id\n\t\tprint \"title = %s\" % title\n\t\tprint \"editors = %s\" % editors\n\t\tprint \"pub_date = %s\" % datetime.strptime(pub_date[0], '%B %d, %Y')\n\t\tprint \"copyright = %s\" % copyright\n\t\tprint \"data_availibility = %s\" % data_availibility\n\t\tprint \"funding = %s\" % funding\n\t\tprint \"competing_interest = %s\" % competing_interest\n \n\t\t# write to database\n\t\t###item = pubMetaItem()\n\t\t###item['publisher'] = publisher\n\t\t###if len(pdf_address) > 0:\n\t\t###\titem['pdf_address'] = pdf_address[0]\n\t\t###item['src_address'] = src_address\n\t\t###item['doc_id'] = doc_id[0]\n\t\t###item['title'] = title[0]\n\t\t###if len(editors) > 0:\n\t\t###\titem['editors'] = editors[0]\n\t\t###if len(pub_date) > 0:\n\t\t###\titem['pub_date'] = datetime.strptime(pub_date[0], '%B %d, %Y')\t\t\t# convert to djan\n\t\t###if len(copyright) > 0:\n\t\t###\titem['copyright'] = copyright[0]\n\t\t###if len(data_availibility) > 0:\n\t\t###\titem['data_availibility'] = data_availibility[0]\n\t\t###if len(funding) > 0:\n\t\t###\titem['funding'] = funding[0]\n\t\t###if len(competing_interest) > 0:\n\t\t###\titem['competing_interest'] = competing_interest[0]\n\t\t###item['rec_update_time'] = datetime.now()\t\t\t# TODO: use GMT instead\n\t\t###item['rec_update_by'] = \"sys\"\n\t\t###docHeader = item.save()\n\n def parseMNM(self, response):\n\t\theaderList = response.xpath(\"//div[starts-with(@id,'section')]/h2/text()\").extract()\t\t\t###\n\t\t\n\t\t# find section id having title \"Materials and Methods\"\n\t\tcount = 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\n\t\tfor header in headerList:\n\t\t\tif header == \"Materials and Methods\" or header == \"Methods\":\n\t\t\t\tmnmHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\t\n\t\tprint \"MNM seciont id = %s \" % mnmHeaderNb\n\t\t\n\t\t# assign M&M section selector\n\t\tmnmSelectorStr = \"//div[@id='section%d']\" % mnmHeaderNb\n\t\tmnmSelector = response.xpath(mnmSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']/h3/text()\" % mnmHeaderNb\t\t\t\t\t\t\t###\n\t\tsubHeaderList = mnmSelector.xpath(subHeaderListStr).extract()\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\t#subHeaderStr = \"//h4[%d]\" % headerSeq\n\t\t\t\tsubHeaderStr = \"//div[@id='section%d']/h3[%d]\" % (mnmHeaderNb, headerSeq)\t\t\t###\n\t\t\t\tfor h4 in mnmSelector.xpath(subHeaderStr):\n\t\t\t\t\tparagraphs = h4.xpath(\"\"\"set:difference(./following-sibling::p,\n\t\t\t\t\t\t\t\t\t\t\t\t\t./following-sibling::h3[1]/following-sibling::p)\"\"\").extract()\t\t\t###\n\t\t\t\t\tcontentSeq = 1\n\t\t\t\t\tfor prgrph in paragraphs:\n\t\t\t\t\t\tprint \"section_id = %s\" % headerSeq\n\t\t\t\t\t\tprint \"header = %s\" % subHeader\n\t\t\t\t\t\tprint \"content_seq = %s\" % contentSeq\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint \"content = %s\" % prgrph\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint \"content error in decoding\"\n\t\t\t\t\t\t#item = pubMNMItem()\n\t\t\t\t\t\t#item['doc_id'] = docHeader\n\t\t\t\t\t\t#item['section_id'] = headerSeq\n\t\t\t\t\t\t#item['header'] = subHeader\n\t\t\t\t\t\t#item['content_seq'] = contentSeq\n\t\t\t\t\t\t#item['content'] = prgrph\n\t\t\t\t\t\t#item.save()\n\t\t\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\theaderSeq = headerSeq + 1\n\t\telse:\n\t\t\tparagraphs = mnmSelector.xpath(\"p\")\n\t\t\tcontentSeq = 1\n\t\t\tfor prgrph in paragraphs:\n\t\t\t\tprint \"section_id = %s\" % \"1\"\n\t\t\t\tprint \"header = %s\" % \"\"\n\t\t\t\tprint \"content_seq = %s\" % contentSeq\n\t\t\t\ttry:\n\t\t\t\t\tprint \"content = %s\" % prgrph.xpath(\"string()\").extract()\n\t\t\t\texcept:\n\t\t\t\t\tprint \"content error in decoding\"\n\t\t\t\t#item = pubMNMItem()\n\t\t\t\t#item['doc_id'] = docHeader\n\t\t\t\t#item['section_id'] = 1\n\t\t\t\t#item['header'] = \"\"\n\t\t\t\t#item['content_seq'] = contentSeq\n\t\t\t\t#item['content'] = prgrph.xpath(\"string()\").extract()\n\t\t\t\t#item.save()\n\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\t\n def parseResults(self, response):\n\t\theaderList = response.xpath(\"//div[starts-with(@id,'section')]/h2/text()\").extract()\n\t\t\n\t\t# find section id having title \"Results\"\n\t\tcount = 1\n\t\tfor header in headerList:\n\t\t\tif header == \"Results\":\n\t\t\t\tresultHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\n\t\t# assign M&M section selector\n\t\tresultSelectorStr = \"//div[@id='section%d']\" % resultHeaderNb\n\t\tresultSelector = response.xpath(resultSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']/h3/text()\" % resultHeaderNb\n\t\tsubHeaderList = resultSelector.xpath(subHeaderListStr).extract()\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\t#subHeaderStr = \"//h4[%d]\" % headerSeq\n\t\t\t\tsubHeaderStr = \"//div[@id='section%d']/h3[%d]\" % (resultHeaderNb, headerSeq)\n\t\t\t\tfor h4 in resultSelector.xpath(subHeaderStr):\n\t\t\t\t\tparagraphs = h4.xpath(\"\"\"set:difference(./following-sibling::p,\n\t\t\t\t\t\t\t\t\t\t\t\t\t./following-sibling::h3[1]/following-sibling::p)\"\"\").extract()\n\t\t\t\t\tcontentSeq = 1\n\t\t\t\t\tfor prgrph in paragraphs:\n\t\t\t\t\t\tprint \"section_id = %s\" % headerSeq\n\t\t\t\t\t\tprint \"header = %s\" % subHeader\n\t\t\t\t\t\tprint \"content_seq = %s\" % contentSeq\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint \"content = %s\" % prgrph\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint \"content error in decoding\"\n\t\t\t\t\t\t#item = pubResultItem()\n\t\t\t\t\t\t#item['doc_id'] = docHeader\n\t\t\t\t\t\t#item['section_id'] = headerSeq\n\t\t\t\t\t\t#item['header'] = subHeader\n\t\t\t\t\t\t#item['content_seq'] = contentSeq\n\t\t\t\t\t\t#item['content'] = prgrph\n\t\t\t\t\t\t#item.save()\n\t\t\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\theaderSeq = headerSeq + 1\n\t\telse:\n\t\t\tparagraphs = resultSelector.xpath(\"p\")\n\t\t\tcontentSeq = 1\n\t\t\tfor prgrph in paragraphs:\n\t\t\t\tprint \"section_id = %s\" % \"1\"\n\t\t\t\tprint \"header = %s\" % \"\"\n\t\t\t\tprint \"content_seq = %s\" % contentSeq\n\t\t\t\ttry:\n\t\t\t\t\tprint \"content = %s\" % prgrph.xpath(\"string()\").extract()\n\t\t\t\texcept:\n\t\t\t\t\tprint \"content error in decoding\"\n\t\t\t\t#item = pubResultItem()\n\t\t\t\t#item['doc_id'] = docHeader\n\t\t\t\t#item['section_id'] = 1\n\t\t\t\t#item['header'] = \"\"\n\t\t\t\t#item['content_seq'] = contentSeq\n\t\t\t\t#item['content'] = prgrph.xpath(\"string()\").extract()\n\t\t\t\t#item.save()\n\t\t\t\tcontentSeq = contentSeq + 1\n\t\n def parseFigure(self, response):\n\t\tfigIdList = response.xpath(\"//div[contains(@class,'figure')]/@data-doi\").extract()\n\t\t\n\t\titemId = 1\n\t\tfor figId in figIdList:\n\t\t\txpathHeaderStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/div[contains(@class, 'figcaption')]/text()\" % figId\n\t\t\txpathContentStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/p[2]\" % figId\n\t\t\txpathUrlStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/div[contains(@class, 'img-box')]/a/@href\" % figId\n\t\t\t\n\t\t\theaderList = response.xpath(xpathHeaderStr).extract()\n\t\t\tcontentList = response.xpath(xpathContentStr).extract()\n\t\t\turlList = response.xpath(xpathUrlStr).extract()\n\t\t\t\n\t\t\tif len(headerList) > 0:\n\t\t\t\tprint \"header = %s \" % headerList[0]\n\t\t\tif len(contentList) > 0:\n\t\t\t\ttry:\n\t\t\t\t\tprint \"content = %s \" % contentList[0]\n\t\t\t\texcept:\n\t\t\t\t\tprint \"content encoding problem\"\n\t\t\tif len(urlList) > 0:\n\t\t\t\tprint \"url = %s \" % urlList[0]\n\t\t\titemId = 1\n\t\n def parseSI(self, response):\n\t\theaderList = response.xpath(\"//div[contains(@id,'section')]/h2/text()\").extract()\t\t# WARNING: content structure changed\n\t\t\n\t\t# find section id having title \"Supporting Information\"\n\t\tcount = 0\n\t\tfor header in headerList:\n\t\t\tif header == \"Supporting Information\":\n\t\t\t\tsiHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\t\t\n\t\t# assign supporting information section selector\n\t\tresultSelectorStr = \"//div[@id='section%d']\" % siHeaderNb\n\t\tresultSelector = response.xpath(resultSelectorStr)\n\t\t\n\t\tsubHeaderListStr = \"//div[@id='section%d']//h3/a/text()\" % siHeaderNb\n\t\tsubHeaderList = resultSelector.xpath(subHeaderListStr).extract()\n\t\t\n\t\tif len(subHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in subHeaderList:\n\t\t\t\txpathTitleListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/h3/a/text()\" % (siHeaderNb, headerSeq)\n\t\t\t\txpathUrlListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/h3/a/@href\" % (siHeaderNb, headerSeq)\n\t\t\t\txpathContentListStr = \"//div[@id='section%d']/div[@class='supplementary-material'][%d]/p[@class='preSiDOI']/text()\" % (siHeaderNb, headerSeq)\n\t\t\t\t\n\t\t\t\theaderList = response.xpath(xpathTitleListStr).extract()\n\t\t\t\turlList = response.xpath(xpathUrlListStr).extract()\n\t\t\t\tcontentList = response.xpath(xpathContentListStr).extract()\n\t\t\t\t\n\t\t\t\titem = pubSIItem()\n\t\t\t\titem['doc_id'] = pub_meta.objects.get(id=2)\n\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\tif len(headerList) > 0:\n\t\t\t\t\titem['header'] = headerList[0]\n\t\t\t\tif len(urlList) > 0:\n\t\t\t\t\titem['url'] = urlList[0]\n\t\t\t\tif len(contentList) > 0:\n\t\t\t\t\titem['content'] = contentList[0]\n\t\t\t\titem.save()\n\t\t\t\theaderSeq = headerSeq + 1\n\t\t\t\t\n\t" }, { "alpha_fraction": 0.7661574482917786, "alphanum_fraction": 0.7673325538635254, "avg_line_length": 20.846153259277344, "blob_id": "3a26539759b248900b18afa18cb46e8f97c0ae1c", "content_id": "0b5e7c18d0e6595fa1a1fb505a10ebcf149d6af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 127, "num_lines": 39, "path": "/scripts/Plosone/items.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\t\nfrom scrapy.contrib.djangoitem import DjangoItem\nfrom scrapy.item import Field\n\nfrom scin.models import pub_meta, pub_material_n_method, pub_result, pub_figure, pub_support_info, pub_abstract, pub_discussion\n\nclass pubMetaItem(DjangoItem):\n\tdjango_model = pub_meta\n\tpass\n\t\nclass pubMNMItem(DjangoItem):\n\tdjango_model = pub_material_n_method\n\tpass\n\t\nclass pubResultItem(DjangoItem):\n\tdjango_model = pub_result\n\tpass\n\nclass pubFigureItem(DjangoItem):\n\tdjango_model = pub_figure\n\tpass\n\nclass pubSIItem(DjangoItem):\n\tdjango_model = pub_support_info\n\tpass\n\t\nclass pubAbstractItem(DjangoItem):\n\tdjango_model = pub_abstract\n\tpass\n\t\nclass pubDiscussionItem(DjangoItem):\n\tdjango_model = pub_discussion\n\tpass" }, { "alpha_fraction": 0.7198275923728943, "alphanum_fraction": 0.7284482717514038, "avg_line_length": 25.730770111083984, "blob_id": "2db60d3b44f8b2d32574a6f32cf3b9873f3ded10", "content_id": "ad57de7ad966acc80df3cc5840934e99d023cf68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 72, "num_lines": 26, "path": "/scripts/Plosone/settings.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Scrapy settings for Plosone project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\n# Setting up django's project full path.\nimport sys\nsys.path.insert(0, 'C:\\\\Python27\\\\scinapsis')\n\n# Setting up django's settings module name.\n# This module is located at C:\\Python27\\scinapsis\\scinapsis\\settings.\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'scinapsis.settings'\n\nimport django\ndjango.setup()\n\nBOT_NAME = 'Plosone' ###set to path name\n\nSPIDER_MODULES = ['Plosone.spiders'] ###\nNEWSPIDER_MODULE = 'Plosone.spiders' ###\n\n" }, { "alpha_fraction": 0.6791171431541443, "alphanum_fraction": 0.696943998336792, "avg_line_length": 27.731706619262695, "blob_id": "189553ea75b216dfab917c022eb6783c681c2522", "content_id": "033fa48a0fafb3dc0f672675eca94a9882d330f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 156, "num_lines": 41, "path": "/scripts/product_tech_search_by_sup/search_main_tech_prod_only.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import sys\nimport MySQLdb\nimport codecs\n\nimport run_technique\nimport run_supplier\nimport run_product\nimport run_product_tech\nimport flush_temp_tables\n\nfrom warnings import filterwarnings\n\nsys.setrecursionlimit(20000)\nfilterwarnings('ignore', category = MySQLdb.Warning)\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\ndoc_id = 0\ntry:\n\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\tmysql_cursor = mysql.cursor()\n\t\n\tmysql_cursor.execute(\"select distinct trtab.doc_id as id \"\n\t\t\t\t\t\t\t\"from scin_db.pub_technique_result trtab \"\n\t\t\t\t\t\t\t\"inner join scin_db.pub_product_result prtab \"\n\t\t\t\t\t\t\t\"on trtab.doc_id = prtab.doc_id\")\n\t\n\tfor (id) in mysql_cursor:\n\t\tdoc_id = id\n\t\tprint \"procesing doc_id: %d\" % doc_id\n\t\t\n\t\trun_product_tech.search_product_tech(doc_id)\n\t\tflush_temp_tables.flush_temp_tables(doc_id)\n\t\tprint \"doc_id [%d] search completed\" % doc_id\n\t\n\tmysql_cursor.close()\n\tmysql.close()\n\nexcept MySQLdb.Error, e:\n\terrmsg = \"MySQL Error @tech_prod_only (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\twith open(\"error.log\", 'w') as w:\n\t\tw.write(errmsg)\n\tsys.exit(1)\n" }, { "alpha_fraction": 0.6537717580795288, "alphanum_fraction": 0.6779497265815735, "avg_line_length": 35.96428680419922, "blob_id": "979f4093256bfae44941675289305e90739c9137", "content_id": "86b053e38cd435326561780f01aca2b3427ebe30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 159, "num_lines": 28, "path": "/scripts/patch_abstract/run_spider_patch7.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import codecs\nimport MySQLdb\nimport re\nimport sys\nimport os\nfrom warnings import filterwarnings\n\nfilterwarnings('ignore', category = MySQLdb.Warning)\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\ntry:\n mysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n mysql_cursor = mysql.cursor()\n \n # call search protein keywords\n query = (\"SELECT id, src_address FROM scin_db.scin_pub_meta WHERE id between 60001 and 70000 ORDER BY id\")\n mysql_cursor.execute(query)\n\n for (id, src_address) in mysql_cursor:\n\t\tprint \"PROCESS URL = %s, id = %d \" % (src_address, id)\n\t\tcommand = 'scrapy.exe crawl PlosonePatchAbstract -a start_url=\"%s\" -a doc_id=\"%s\" -s LOG_FILE=patch_output.txt ' % (src_address, id)\n\t\tos.system(command)\n\t\tprint \"PROCESS COMPLETE\"\n\t \nexcept MySQLdb.Error, e:\n\terrmsg = \"MySQL Error (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\twith open(\"error.log\", 'w') as w:\n\t\tw.write(errmsg)\n\tsys.exit(1)" }, { "alpha_fraction": 0.6401180028915405, "alphanum_fraction": 0.6625368595123291, "avg_line_length": 29.26785659790039, "blob_id": "ef34c13ae062d62f8158adf711ae93d9cdb8bf6b", "content_id": "707501080b0dd9a2ac1bdf603317d51e6b6f90de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1695, "license_type": "no_license", "max_line_length": 156, "num_lines": 56, "path": "/scripts/product_tech_search/search_main28.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import sys\nimport MySQLdb\nimport codecs\n\nimport run_technique\nimport run_supplier\nimport run_product\nimport run_product_tech\nimport flush_temp_tables\n\nfrom warnings import filterwarnings\n\nsys.setrecursionlimit(20000)\nfilterwarnings('ignore', category = MySQLdb.Warning)\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\ndoc_id = 0\ntry:\n\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\tmysql_cursor = mysql.cursor()\n\t\n\tmysql_cursor.execute(\"SELECT id FROM scin_db.scin_pub_meta ht \"\n\t\t\t\t\t\t\t\"WHERE EXISTS ( \"\n\t\t\t\t\t\t\t\"SELECT 1 FROM scin_db.scin_pub_figure ft \"\n\t\t\t\t\t\t\t\"WHERE ht.id = ft.doc_id \"\n\t\t\t\t\t\t\t\") \"\n\t\t\t\t\t\t\t\"AND EXISTS ( \"\n\t\t\t\t\t\t\t\"SELECT 1 FROM scin_db.scin_pub_material_n_method mt \"\n\t\t\t\t\t\t\t\"WHERE mt.id = mt.doc_id \"\n\t\t\t\t\t\t\t\") \"\n\t\t\t\t\t\t\t\"AND id BETWEEN 67501 and 70000 \"\n\t\t\t\t\t\t\t\"ORDER BY id\")\n\t\n\tfor (id) in mysql_cursor:\n\t\tdoc_id = id\n\t\tprint \"procesing doc_id: %d\" % doc_id\n\t\ttechResult = run_technique.search_tech(doc_id)\n\t\tprint \"\t\t1) run technique search: %d \" % techResult\n\t\t\n\t\tsuppResult = run_supplier.search_supplier(doc_id)\n\t\tprint \"\t\t2) run supplier search: %d \" % suppResult\n\t\tif suppResult > 0:\n\t\t\tprodResult = run_product.search_product(doc_id)\n\t\t\tprint \"\t\t3) run product search: %d \" % prodResult\n\t\t\tif prodResult > 0:\n\t\t\t\trun_product_tech.search_product_tech(doc_id)\n\t\tflush_temp_tables.flush_temp_tables(doc_id)\n\t\tprint \"doc_id [%d] search completed\" % doc_id\n\t\n\tmysql_cursor.close()\n\tmysql.close()\n\nexcept MySQLdb.Error, e:\n\terrmsg = \"MySQL Error (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\twith open(\"error.log\", 'w') as w:\n\t\tw.write(errmsg)\n\tsys.exit(1)\n" }, { "alpha_fraction": 0.6858638525009155, "alphanum_fraction": 0.6922629475593567, "avg_line_length": 29.157894134521484, "blob_id": "453c2108d011d2cb107d1e723382176352a10cde", "content_id": "f8590596fe9c507cc1dcb47742a2a6dbe2f7ccf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1719, "license_type": "no_license", "max_line_length": 104, "num_lines": 57, "path": "/scripts/Plosone/spiders/patching_meta_spider2.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nimport urllib2\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem\nfrom scin.models import pub_meta\n\nclass PlosonePatchSpider(CrawlSpider):\n name = \"PlosoneMetaPatch2\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n doc_id = 0\n\t\n def __init__(self, *args, **kwargs): \n super(PlosonePatchSpider, self).__init__(*args, **kwargs) \n self.start_urls = [kwargs.get('start_url')] \n self.doc_id = int(kwargs.get('doc_id'))\n\t\n def parse(self, response):\n\t\tdoc_instance = pub_meta.objects.get(id=self.doc_id)\n\t\t\n\t\t# find articleInfo\n\t\tpoi = doc_instance.doc_id\n\t\tsource_id = \"cited,viewed,saved,discussed\"\n\t\tprint poi\n\n\t\turl = 'http://alm.plos.org:80/api/v5/articles?ids=%s&source_id=%s' % (poi, source_id)\n\n\t\tdata = json.load(urllib2.urlopen(url))\n\t\tviews = data['data'][0]['viewed']\n\t\tsaves = data['data'][0]['saved']\n\t\tshares = data['data'][0]['discussed']\n\t\tcitation = data['data'][0]['cited']\n\t\tprint views\n\t\tprint saves\n\t\tprint shares\n\t\tprint citation\n\t\t\n\t\t# construct author\n\t\tauthor = \"\"\n\t\tauthorlist = response.xpath(\"//div[@class='title-authors']//a[@class='author-name']/text()\").extract()\n\n\t\tfor authorname in authorlist:\n\t\t\tauthorname = authorname.replace(\"\\n\", \"\").strip()\n\t\t\tauthor = author + authorname\n\t\t#print author\n\t\t\n\t\t# write data\n\t\tdoc_instance.author = author\n\t\tdoc_instance.views = views\n\t\tdoc_instance.saves = saves\n\t\tdoc_instance.shares = shares\n\t\tdoc_instance.citation = citation\n\t\t\t\n\t\tdoc_instance.save()\n" }, { "alpha_fraction": 0.6479899287223816, "alphanum_fraction": 0.6582914590835571, "avg_line_length": 34.18584060668945, "blob_id": "126e46ab34ac0c65b641cd0502591c48799f3f9b", "content_id": "4700de2bf7cf5617a3ca667179dce55dfafbf338", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3980, "license_type": "no_license", "max_line_length": 126, "num_lines": 113, "path": "/scripts/Plosone/spiders/patching_spider_abstract.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem, pubAbstractItem, pubDiscussionItem\nfrom scin.models import pub_meta\n\nclass PlosonePatchSpider(CrawlSpider):\n name = \"PlosonePatchAbstract\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n doc_id = 0\n\t\n def __init__(self, *args, **kwargs): \n super(PlosonePatchSpider, self).__init__(*args, **kwargs) \n self.start_urls = [kwargs.get('start_url')] \n self.doc_id = int(kwargs.get('doc_id'))\n\t\n def parse(self, response):\n\t\t# define instance\n\t\tdoc_instance = pub_meta.objects.get(id=self.doc_id)\n\t\n\t\t# STEP1: patch abstract\n\t\tabsSelectorStr = \"//div[contains(@class,'abstract')]\"\n\t\tabsSelector = response.xpath(absSelectorStr)\n\t\t\n\t\tabsHeaderListStr = \"//div[contains(@class,'abstract')]/h3/text()\"\n\t\tabsHeaderList = absSelector.xpath(absHeaderListStr).extract()\n\t\tif len(absHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in absHeaderList:\n\t\t\t\t#subHeaderStr = \"//h4[%d]\" % headerSeq\n\t\t\t\tsubHeaderStr = \"//div[contains(@class,'abstract')]/h3[%d]\" % (headerSeq)\n\t\t\t\tfor h4 in absSelector.xpath(subHeaderStr):\n\t\t\t\t\tparagraphs = h4.xpath(\"\"\"set:difference(./following-sibling::p,\n\t\t\t\t\t\t\t\t\t\t\t\t\t./following-sibling::h3[1]/following-sibling::p)\"\"\").extract()\n\t\t\t\t\tcontentSeq = 1\n\t\t\t\t\tfor prgrph in paragraphs:\n\t\t\t\t\t\titem = pubAbstractItem()\n\t\t\t\t\t\titem['doc'] = doc_instance\n\t\t\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\t\t\titem['header'] = subHeader\n\t\t\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\t\t\titem['content'] = prgrph\n\t\t\t\t\t\titem.save()\n\t\t\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\theaderSeq = headerSeq + 1\n\t\telse:\n\t\t\tparagraphs = absSelector.xpath(\"p\")\n\t\t\tcontentSeq = 1\n\t\t\tfor prgrph in paragraphs:\n\t\t\t\titem = pubAbstractItem()\n\t\t\t\titem['doc'] = doc_instance\n\t\t\t\titem['section_id'] = 1\n\t\t\t\titem['header'] = \"\"\n\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\tcontent = prgrph.xpath(\"string()\").extract()\n\t\t\t\tif len(content) > 0:\n\t\t\t\t\titem['content'] = content[0]\n\t\t\t\titem.save()\n\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\n\t\t# STEP2: patch discussion\n\t\theaderList = response.xpath(\"//div[contains(@id,'section')]/h2/text()\").extract()\t\t# WARNING: content structure changed\n\n\t\t# find section id having title \"Discussion\"\n\t\tcount = 0\n\t\tdisHeaderNb = 0\n\t\tfor header in headerList:\n\t\t\tif header == \"Discussion\":\n\t\t\t\tdisHeaderNb = count\n\t\t\t\tbreak\n\t\t\tcount = count + 1\n\n\t\t# assign disucssion section selector\n\t\tdisSelectorStr = \"//div[@id='section%d']\" % disHeaderNb\n\t\tdisSelector = response.xpath(disSelectorStr)\n\n\t\tdisHeaderListStr = \"//div[@id='section%d']/h3/text()\" % disHeaderNb\n\t\tdisHeaderList = disSelector.xpath(disHeaderListStr).extract()\n\t\tif len(disHeaderList) > 0:\n\t\t\theaderSeq = 1\n\t\t\tfor subHeader in disHeaderList:\n\t\t\t\t#subHeaderStr = \"//h4[%d]\" % headerSeq\n\t\t\t\tsubHeaderStr = \"//div[@id='section%d']/h3[%d]\" % (disHeaderNb, headerSeq)\n\t\t\t\tfor h4 in disSelector.xpath(subHeaderStr):\n\t\t\t\t\tparagraphs = h4.xpath(\"\"\"set:difference(./following-sibling::p,\n\t\t\t\t\t\t\t\t\t\t\t\t\t./following-sibling::h3[1]/following-sibling::p)\"\"\").extract()\n\t\t\t\t\tcontentSeq = 1\n\t\t\t\t\tfor prgrph in paragraphs:\n\t\t\t\t\t\titem = pubDiscussionItem()\n\t\t\t\t\t\titem['doc'] = doc_instance\n\t\t\t\t\t\titem['section_id'] = headerSeq\n\t\t\t\t\t\titem['header'] = subHeader\n\t\t\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\t\t\titem['content'] = prgrph\n\t\t\t\t\t\titem.save()\n\t\t\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\theaderSeq = headerSeq + 1\n\t\telse:\n\t\t\tparagraphs = disSelector.xpath(\"p\")\n\t\t\tcontentSeq = 1\n\t\t\tfor prgrph in paragraphs:\n\t\t\t\titem = pubDiscussionItem()\n\t\t\t\titem['doc'] = doc_instance\n\t\t\t\titem['section_id'] = 1\n\t\t\t\titem['header'] = \"\"\n\t\t\t\titem['content_seq'] = contentSeq\n\t\t\t\tcontent = prgrph.xpath(\"string()\").extract()\n\t\t\t\tif len(content) > 0:\n\t\t\t\t\titem['content'] = content[0]\n\t\t\t\titem.save()\n\t\t\t\tcontentSeq = contentSeq + 1\n\t\t\t\t" }, { "alpha_fraction": 0.6990291476249695, "alphanum_fraction": 0.7009708881378174, "avg_line_length": 31.87234115600586, "blob_id": "21727bc85a2efaad3c937144957b752574112e42", "content_id": "c0af420296bdab302d6f6d35aedf50f706bd33c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 129, "num_lines": 47, "path": "/scripts/Plosone/spiders/patching_meta_spider3.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nimport urllib2\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom Plosone.items import pubMetaItem, pubMNMItem, pubResultItem, pubFigureItem, pubSIItem\nfrom scin.models import pub_meta\n\nclass PlosonePatchSpider(CrawlSpider):\n name = \"PlosoneMetaPatch3\"\n allowed_domains = [\"plosone.com\", \"plosone.org\", \"plos.org\"]\n doc_id = 0\n\t\n def __init__(self, *args, **kwargs): \n super(PlosonePatchSpider, self).__init__(*args, **kwargs) \n self.start_urls = [kwargs.get('start_url')] \n self.doc_id = int(kwargs.get('doc_id'))\n\t\n def parse(self, response):\n\t\tdoc_instance = pub_meta.objects.get(id=self.doc_id)\n\t\t\n\t\t# figure list\n\t\tpub_figure_list = doc_instance.pub_figure_set.all()\n\t\tfor doc_figure in pub_figure_list:\n\t\t\tfigId = doc_figure.figure_id\n\t\t\txpathUrlStr = \"//div[contains(@class,'figure') and contains(@data-doi,'%s')]/div[contains(@class, 'img-box')]/a/@href\" % figId\n\t\t\n\t\t\n\t\t# construct author\n\t\tauthor = \"\"\n\t\tauthorlist = response.xpath(\"//div[@class='title-authors']//a[@class='author-name']/text()\").extract()\n\n\t\tfor authorname in authorlist:\n\t\t\tauthorname = authorname.replace(\"\\n\", \"\").strip()\n\t\t\tauthor = author + authorname\n\t\t#print author\n\t\t\n\t\t# write data\n\t\tdoc_instance.author = author\n\t\tdoc_instance.views = views\n\t\tdoc_instance.saves = saves\n\t\tdoc_instance.shares = shares\n\t\tdoc_instance.citation = citation\n\t\t\t\n\t\tdoc_instance.save()\n" }, { "alpha_fraction": 0.6449903845787048, "alphanum_fraction": 0.6608863472938538, "avg_line_length": 39.70588302612305, "blob_id": "300a9bfc49a96093de6a807edbb6d633abbd2f69", "content_id": "018384ced6afc786a8e026c80877d2490e3944b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2076, "license_type": "no_license", "max_line_length": 157, "num_lines": 51, "path": "/scripts/pathway_search/run_pathway.py", "repo_name": "boysingwong/scinapsis", "src_encoding": "UTF-8", "text": "import codecs\nimport MySQLdb\nimport re\nimport sys\nfrom warnings import filterwarnings\n\ndef search_pathway(doc_id):\n\tfilterwarnings('ignore', category = MySQLdb.Warning)\n\tsys.stdout = codecs.getwriter('utf8')(sys.stdout)\n\ttry:\n\t\tmysql = MySQLdb.connect(user='root',passwd='password1',db='scin_db',host='127.0.0.1',port=3306, autocommit = 'True', charset = 'utf8', use_unicode = True)\n\t\tmysql_cursor = mysql.cursor()\n\t\t\n\t\t# call search protein keywords\n\t\targs = [doc_id]\n\t\tmysql_cursor.callproc( 'scin_db.pub_pathway_exists', args )\n\t\t\n\t\t#print 'start query pathway temp'\n\t\tquery = (\"SELECT protein_gene_name1, protein_gene_name2, phase, phase_group_name, section_id, content_seq, content FROM scin_db.pub_pathway_temp\")\n\t\tmysql_cursor.execute(query)\n\t\t#print 'end query pathway temp'\n\t\t\n\t\trsltCount = 0\n\t\tfor (protein_gene_name1, protein_gene_name2, phase, phase_group_name, section_id, content_seq, content) in mysql_cursor:\n\t\t outputStr = \"result: %s, %s, %s, %s, %d, %d \" % (protein_gene_name1, protein_gene_name2, phase, phase_group_name, section_id, content_seq)\n\t\t \n\t\t # todo change regex pattern\n\t\t pat1 = ur'\\b%s\\b.*?\\b%s\\b.*?\\b%s\\b' % (protein_gene_name1, phase, protein_gene_name2)\n\t\t \n\t\t sentenceList = re.split(ur'(?<!\\w\\.\\w.)(?<![A-Z]\\.)(?<=\\.|\\?)\\s', content)\n\t\t for sentence in sentenceList:\n\t\t\t #print 'check[%s]' % sentence\n\t\t\t result1 = re.search(pat1, sentence)\n\t\t\t if result1:\n\t\t\t\tinsertStmt = (\"INSERT INTO scin_db.pub_pathway_result \"\n\t\t\t\t\t\t\t \"(doc_id, protein_gene_name1, protein_gene_name2, phase, phase_group_name, section_id, content_seq, sentence) \"\n\t\t\t\t\t\t\t \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\")\n\t\t\t\tmysql_cursor.execute(insertStmt, (doc_id, protein_gene_name1, protein_gene_name2, phase, phase_group_name, int(section_id), int(content_seq), sentence) )\n\t\t\t\tmysql.commit()\n\t\t\t\trsltCount = rsltCount + 1\n\n\t\tmysql_cursor.close()\n\t\tmysql.close()\n\t\t\n\t\treturn rsltCount\n\n\texcept MySQLdb.Error, e:\n\t\terrmsg = \"MySQL Error (@%d) %d: %s\" % ( doc_id, e.args[0], e.args[1] )\n\t\twith open(\"error.log\", 'w') as w:\n\t\t\tw.write(errmsg)\n\t\tsys.exit(1)\n" } ]
27
matthewpoletin/matchmaking
https://github.com/matthewpoletin/matchmaking
85112caa6f73a566f66c3f147680c0d562558469
3619c452b1a1e57a790e0f56392b6aaad6b55be6
0666a469651f0c58b50d4f819f561a83e00b9a16
refs/heads/master
2022-12-25T02:01:26.682952
2018-05-30T07:18:43
2018-05-30T07:18:43
135,403,891
1
0
null
2018-05-30T07:18:23
2021-03-25T10:55:40
2022-12-08T02:11:08
Python
[ { "alpha_fraction": 0.6655629277229309, "alphanum_fraction": 0.6799116730690002, "avg_line_length": 20.069766998291016, "blob_id": "0be557b90606854a757d80424d5e6e1c2ebb6d60", "content_id": "cbe09858b62c21e069dda5a20b03d5e0388151e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 84, "num_lines": 43, "path": "/src/methods/casual.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\nimport random\nimport logging\nimport uuid\n\nfrom src.base import Base, PlayerStatus\nfrom src.room import Room\n\n\ndef pop_random(players):\n\t\"\"\"\n\tPops random player from dictionary\n\t:param players:\n\t:return:\n\t\"\"\"\n\tif len(list(players.keys())) != 0:\n\t\tplayer_id = list(players.keys())[random.randint(0, len(list(players.keys()))) - 1]\n\t\treturn (players.pop(player_id))[0]\n\telse:\n\t\treturn None\n\n\ndef casual(base: Base):\n\t\"\"\"\n\tCasually (randomly) matches players\n\t{roomId: room, ...}\n\t:return:\n\t\"\"\"\n\t# TODO: get from pool, not base\n\tunmatched = dict(base.players)\n\trooms = {}\n\twhile len(unmatched) > 1:\n\t\tp1 = pop_random(unmatched)\n\t\tp2 = pop_random(unmatched)\n\t\troom = Room([p1, p2])\n\t\tbase.set_status(p1.uuid, PlayerStatus.PLAYING)\n\t\tbase.set_status(p2.uuid, PlayerStatus.PLAYING)\n\t\tlogging.info('Created new room')\n\t\trooms[room.id] = room\n\t\tdel room\n\treturn rooms\n" }, { "alpha_fraction": 0.4961538314819336, "alphanum_fraction": 0.5423076748847961, "avg_line_length": 19, "blob_id": "9e770c971ffbe5583e2d4aac219f60299f84db53", "content_id": "22d94279d204b71c7a230b5a51f6b684c3bdbc99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 74, "num_lines": 26, "path": "/src/tools/gaussian.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "import math\nimport random\n\nfrom matplotlib import pyplot\nimport numpy as np\n\n# def gaussian(x, a, b, c):\n# \treturn - a * math.exp(-((x - b) ** 2) / (2 * c * c))\n\n\n# print(gaussian(-1, 1, 1, 1))\n\n\ndef gaussian(x, a, b, c):\n return a * np.exp(-np.power(x - b, 2.) / (0.1 * np.power(c, 2.)))\n\n\ndef main():\n for a, b, c in [(1, 0, 1)]:\n pyplot.plot(gaussian(np.arange(0, 1, 0.01, dtype=float), a, b, c))\n\n # print(random.uniform(gaussian(player.pause, 1, 0, 100), 1))\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5887640714645386, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 12.9375, "blob_id": "6743f49bc3597559296295f5d651dc5b6cf438c3", "content_id": "a82921760599c6706fbf31e612df19b3bf888065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 90, "num_lines": 32, "path": "/src/methods/marriage.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\n\ndef marriage(players: []):\n\t\"\"\"\n\n\t:param players:\n\t:return:\n\t\"\"\"\n\tpass\n\n\ndef examine(players: []):\n\t\"\"\"\n\t:param players:\n\t:return:\n\t\"\"\"\n\tdiffs = []\n\tfor player_a in players:\n\t\tfor player_b in players:\n\t\t\tdiff = abs(player_a.skill - player_b.skill) / min(player_a.skill, player_b.skill) * 100\n\n\ndef offer(player_a, player_b):\n\t\"\"\"\n\n\t:param player_a:\n\t:param player_b:\n\t:return:\n\t\"\"\"\n\tprint(\"match\")" }, { "alpha_fraction": 0.5987821221351624, "alphanum_fraction": 0.6116373538970947, "avg_line_length": 24.482759475708008, "blob_id": "531fa7ba362b0d9bcddd14a915bf9038bc5711a0", "content_id": "acc3a9dd34e19e15cfb49f5225854924b4739115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1574, "license_type": "no_license", "max_line_length": 90, "num_lines": 58, "path": "/src/methods/experience.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\nimport random\nimport logging\nimport uuid\n\nfrom src.base import Base, PlayerStatus\nfrom src.room import Room\n\n\ndef pop_random(players):\n \"\"\"\n Pops random player from dictionary\n :param players:\n :return:\n \"\"\"\n if len(list(players.keys())) != 0:\n player_id = list(players.keys())[random.randint(0, len(list(players.keys()))) - 1]\n return (players.pop(player_id))[0]\n else:\n return None\n\n\ndef find_closest(skill, players):\n \"\"\"\n Находит игрока с ближайшим опытом\n :param skill: Опыт игрока\n :param players:\n :return:\n \"\"\"\n closest = players[list(players.keys())[0]]\n for player_id, player in players.items():\n if abs(skill - closest[0].skill) > abs(skill - player[0].skill):\n closest = player\n return players.pop(closest[0].uuid)[0]\n\n\ndef experience(base: Base):\n \"\"\"\n Соединяет игроков в пары на основании опыта\n {roomId: room, ...}\n :param base: База доступных игроков\n :return:\n \"\"\"\n # TODO: get from pool, not base\n unmatched = dict(base.players)\n rooms = {}\n while len(unmatched) > 1:\n p1 = pop_random(unmatched)\n p2 = find_closest(p1.skill, unmatched)\n room = Room([p1, p2])\n base.set_status(p1.uuid, PlayerStatus.PLAYING)\n base.set_status(p2.uuid, PlayerStatus.PLAYING)\n logging.info('Created new room')\n rooms[room.id] = room\n del room\n return rooms\n" }, { "alpha_fraction": 0.5436308979988098, "alphanum_fraction": 0.5703778266906738, "avg_line_length": 30.819149017333984, "blob_id": "097338ce2a5083b2b5f5f388e5c918c6cf0ec321", "content_id": "0a627106458b1df025baf117bd455ae37fbc940c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3265, "license_type": "no_license", "max_line_length": 97, "num_lines": 94, "path": "/main.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\nimport sys\nimport itertools\nimport random\n\nfrom src.base import Base\nfrom src.methods.casual import casual\nfrom src.methods.experience import experience\nfrom src.tools.gaussian import gaussian\n\n\ndef predict(rooms):\n \"\"\"\n Predicts match result\n :param rooms: {roomId: room, ...}\n :return: {roomId: playerId, ...}\n \"\"\"\n print(\"Предсказываю результаты матчей\")\n prediction = {} # {roomId: playerId, ...}\n for room_id, room in rooms.items():\n prediction[room_id] = room.predict_winner().uuid\n return prediction\n\n\ndef simulate(rooms):\n \"\"\"\n Simulates match result\n :param rooms: {roomId: room, ...}\n :return: {roomId: playerId, ...}\n \"\"\"\n print(\"Моделирую результаты матчей\")\n model = {} # {roomId: playerId, ...}\n for room_id, room in rooms.items():\n if len(room.players) > 1:\n room_skill = {} # {playerId: skill, ...}\n for player in room.players:\n room_skill[player.uuid] = player.skill \\\n * random.uniform(gaussian(player.pause, 1, 0, 100), 1)\\\n * random.uniform(1, 1.05)\n winner_id = max(room_skill, key=room_skill.get)\n model[room_id] = winner_id\n return model\n\n\ndef main(argv):\n # Generate bases of players\n base1000 = Base()\n base1000.generate(1000)\n\n base100 = Base(dict(itertools.islice(base1000.players.items(), 100)))\n base500 = Base(dict(itertools.islice(base1000.players.items(), 500)))\n bases = [base100, base500, base1000]\n\n methods = {\n 'метод на опыте': experience,\n 'случайный метод': casual,\n }\n\n # \t100\t\t500\t\t1000\n # results = [\n # [0, 0, 0], # experience\n # [0, 0, 0], # casual\n # ]\n results = {}\n for method in methods:\n results[method] = []\n\n for base_index, base in enumerate(bases):\n print(\"\\nРаботаю с базой в {} игроков\".format(len(base.players)))\n for name, method in methods.items():\n print(\"\\nИспользую {}\".format(name))\n # Генерируем комнаты случайным методом\n rooms = method(base)\n # Предсказываем результат матча\n prediction = predict(rooms)\n # Моделируем результат матча\n sim = simulate(rooms)\n # Сравниваем предсказание и результат\n print(\"Сравниваю предсказание и результат\")\n shared_items = set(prediction.items()) & set(sim.items())\n quality = round((len(shared_items) / len(prediction) * 100), 2)\n print(\"Точность результата: {0:.2f}%\".format(quality))\n results[name].append(quality)\n\n print(results)\n\n # row_format = \"{:>15}\" * (len(teams_list) + 1)\n # print(row_format.format(\"\", *teams_list))\n # for team, row in zip(teams_list, data):\n # print(row_format.format(team, *row))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" }, { "alpha_fraction": 0.5754601359367371, "alphanum_fraction": 0.6036809682846069, "avg_line_length": 27.10344886779785, "blob_id": "50e8728cc3fee872fa91e6c6cae549cd4ee2efbd", "content_id": "73840b603649973a7b45a0760c9c4637a6139475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 77, "num_lines": 58, "path": "/src/player.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*\n\nimport random\nimport uuid\n\nmaxAllowedAge = 18\nminAge = maxAllowedAge\nmaxAge = 100\nminArmLength = 1\nmaxArmLength = 100\nminHeight = 140\nmaxHeight = 300\nminExperience = 0\nmaxExperience = maxAge - maxAllowedAge\nminGamesPlayed = 0\nmaxGamesPlayed = 100\nminSkill = 100\nmaxSkill = 5000\nminWR = 35\nmaxWR = 65\nminPause = 0\nmaxPause = 28\n\n\nclass Player:\n def __init__(self):\n self.uuid = None\n self.skill = None\n self.wr = None\n self.pause = None\n # self.age = None\n # self.height = None\n # self.gamesPlayed = None\n # self.experience = None\n\n @staticmethod\n def generate():\n player = Player()\n player.uuid = uuid.uuid4()\n player.skill = random.randint(minSkill, maxSkill)\n player.wr = random.randint(minWR, maxWR)\n # player.age = random.randint(minAge, maxAge)\n # player.height = random.randint(minHeight, maxHeight)\n # player.gamesPlayed = random.randint(minGamesPlayed, maxGamesPlayed)\n # player.experience = random.randint(minExperience, maxExperience)\n player.pause = random.randint(minPause, maxPause)\n return player\n\n def __str__(self):\n print('\\n' + '\\033[1;34m' + 'Player Info:' + '\\033[0m')\n print('player: ' + str(self.uuid))\n print('skill: ' + str(self.skill))\n print('wr: {}%'.format(self.wr))\n # print('age: ' + str(self.age) + 'y.o.')\n # print('height: ' + str(self.height) + 'cm')\n # print('gamesPlayed: ' + str(self.gamesPlayed))\n # print('experience: ' + str(self.experience) + ' years')\n return \"\"\n" }, { "alpha_fraction": 0.6357927918434143, "alphanum_fraction": 0.6420722007751465, "avg_line_length": 18.014925003051758, "blob_id": "775482f585a421c72400722eb912464318185956", "content_id": "d7a5450e4eeee3b791b6acebba6b702583b0fd50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 48, "num_lines": 67, "path": "/src/room.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\nimport logging\nimport uuid\n\n\nclass Room:\n\t\"\"\"\n\tClass for a single server\n\t\"\"\"\n\n\t# UUID\n\tid = None\n\t# List of players\n\tplayers = []\n\t# Maximum size, 0 for infinite\n\tmax_size = 0\n\n\tdef __init__(self, players: list, max_size=0):\n\t\t\"\"\"\n\t\tConstructor for a room class\n\t\t:param max_size: Maximum size\n\t\t:param players:\n\t\t\"\"\"\n\t\tself.id = uuid.uuid4()\n\t\tself.max_size = max_size\n\t\tif not len(players) > max_size:\n\t\t\tlogging.error('Size mismatch')\n\t\tself.players = players\n\n\tdef predict_winner(self):\n\t\t\"\"\"\n\t\tFor MM predict the winner\n\t\t:return: player\n\t\t\"\"\"\n\t\tif len(self.players) > 1:\n\t\t\t# TODO: convert to using of max() function\n\t\t\twinner = self.players[0]\n\t\t\tfor player in self.players:\n\t\t\t\tif player.wr > winner.wr:\n\t\t\t\t\twinner = player\n\t\t\treturn winner\n\t\telse:\n\t\t\treturn None\n\n\tdef add_player(self, player):\n\t\t\"\"\"\n\t\tAdds specified player to the room\n\t\t:param player:\n\t\t:return:\n\t\t\"\"\"\n\t\tself.players.append(player)\n\n\tdef remove_player(self, player):\n\t\t\"\"\"\n\t\tRemoves specified player from the room\n\t\t:param player:\n\t\t:return:\n\t\t\"\"\"\n\t\tself.players.remove(player)\n\n\tdef __repr__(self):\n\t\tprint('Room {}'.format(self.id))\n\t\tprint('Players: {}'.format(len(self.players)))\n\t\tfor player in self.players:\n\t\t\tprint(player.uuid)\n\t\treturn ''\n" }, { "alpha_fraction": 0.5340579748153687, "alphanum_fraction": 0.5384057760238647, "avg_line_length": 24.55555534362793, "blob_id": "079ab9876cf03e2419e22342ea54c0fefd80b787", "content_id": "f6470c1f19bb28a93dcc172b2b171f911753d0dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2760, "license_type": "no_license", "max_line_length": 112, "num_lines": 108, "path": "/src/base.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\nimport random\nimport uuid\nfrom enum import Enum, unique\n\nfrom src.player import Player\n\n\n@unique\nclass PlayerStatus(Enum):\n OFFLINE = 1\n SEARCHING = 2\n PLAYING = 3\n\n\nclass Base:\n \"\"\"\n Class for list of all players in the system\n \"\"\"\n\n # {uuid: [player, status], ...}\n players = {}\n\n def __init__(self, players=None):\n if players is not None:\n self.players = players\n\n def load_csv(self, file: str):\n \"\"\"\n Loads list of all players from csv file\n :param file: Absolute path to file location\n :type file: str\n :return:\n \"\"\"\n raise NotImplementedError\n\n def load_api(self, t='DEV'):\n \"\"\"\n Loads list of all players from public api\n :param t: Type of api to use (DEV - local, PROD - remote)\n :return:\n \"\"\"\n if t == 'DEV':\n return\n elif t == 'PROD':\n pass\n return\n else:\n raise TypeError(\"t should be 'DEV' or 'PROD'\")\n\n def generate(self, size: int):\n \"\"\"\n Generates list of all players\n :param size:\n :return:\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(\"Type of base size should be int\")\n for i in range(size):\n p = Player.generate()\n self.players[p.uuid] = [p, PlayerStatus.OFFLINE]\n\n def get_player(self, player_id: uuid.UUID):\n \"\"\"\n Finds player by uuid\n :param player_id:\n :return: Player\n \"\"\"\n p = self.players.get(player_id)\n return self.players.get(player_id)[0]\n\n def get_status(self, player_id: uuid.UUID):\n \"\"\"\n Returns current status of the player\n :param player_id:\n :return:\n \"\"\"\n return self.players.get(player_id, None)[1]\n\n def set_status(self, player_id: uuid.UUID, status: PlayerStatus or str):\n \"\"\"\n Changes status of specified player\n :param player_id:\n :param status:\n :return:\n \"\"\"\n if type(status) is str:\n status = PlayerStatus(status)\n player = self.get_player(player_id)\n if player is not None:\n self.players[uuid] = [player, status]\n\n def get_random(self):\n \"\"\"\n Takes random player of the list\n :return: random player\n :rtype: Player\n \"\"\"\n # TODO: Add check for an empty list\n return self.players[list(self.players.keys())[random.randint(0, len(list(self.players.keys())) - 1)]][0]\n\n def __repr__(self):\n for player_id in self.players:\n print(self.players[player_id][0])\n print(self.players[player_id][1])\n return ''\n" }, { "alpha_fraction": 0.5516372919082642, "alphanum_fraction": 0.5566750764846802, "avg_line_length": 14.880000114440918, "blob_id": "37141766897689b942d06d8c56e35e87177ac5b4", "content_id": "c721640ed940774a7bf1561f714b9658d8768c0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/src/pool.py", "repo_name": "matthewpoletin/matchmaking", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\nimport uuid\n\n\nclass Pool:\n \"\"\"\n Class for list of all online players\n \"\"\"\n\n # List of all players waiting (UUIDs)\n players = []\n\n def __init__(self):\n pass\n\n def add_player(self, player_id: uuid.UUID):\n pass\n\n def rem_player(self, player_id: uuid.UUID):\n pass\n\n def get_rand_player(self):\n pass\n" } ]
9
TeamCDG/hashtagman
https://github.com/TeamCDG/hashtagman
36050b1823ea24d08550a6fad2c7b58f4c28f066
4ddf2f8fe23f13552e450b9ec965a2d50d0ba14d
8588832a673bf893357c42f319edaf6033e5711d
refs/heads/master
2021-01-16T20:31:31.063504
2015-02-04T20:31:50
2015-02-04T20:31:50
30,234,176
0
0
null
2015-02-03T09:07:29
2015-02-03T18:39:02
2015-02-03T18:39:02
Python
[ { "alpha_fraction": 0.5, "alphanum_fraction": 0.5119118690490723, "avg_line_length": 35.5, "blob_id": "1f7ee35e423cf9ce3abb5a2f26ab0de2293698fc", "content_id": "d9c495baf76c83004ce224e4ff1b8ab69bd40905", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3378, "license_type": "no_license", "max_line_length": 121, "num_lines": 92, "path": "/index.py", "repo_name": "TeamCDG/hashtagman", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom bottle import *\nfrom hashtagman import *\nfrom hpics import *\n\nversion = \"#HashtagMan v0.1.0a2\"\n\n@route('/')\ndef index() -> \"string\":\n\n if not request.cookies.tip:\n sol = li[randint(0, len(li)-1)]\n response.set_cookie(\"sol\", sol)\n\n ## -1 what you actually want\n response.set_cookie(\"maxf\", \"8\")\n response.set_cookie(\"curf\", \"0\")\n response.set_cookie(\"tip\", \"\")\n\n return version +\": \"+ \\\n greeting()+\"<br>\"+\\\n \"<form action=\\\"/\\\" method=\\\"POST\\\">\"+\\\n \"<input type=\\\"text\\\" name=\\\"ntip\\\" maxlength=\\\"1\\\">\"+\\\n \"<input type=\\\"submit\\\" name=\\\"guess\\\" value=\\\"raten\\\"> <br />\" +\\\n pics[0]\n else:\n tip = request.cookies.tip\n sol = request.cookies.sol\n curf = request.cookies.curf\n maxf = request.cookies.maxf\n\n return \"Welcome back!<br>\"+\\\n \"<link href='http://fonts.googleapis.com/css?family=Source+Code+Pro' rel='stylesheet' type='text/css'>\" +\\\n '<span style=\"font-family:\\'Source Code Pro\\'\">' + partSol(tip, sol) + '</span>' + \"<br />\" +\\\n \"Faults \" + curf + \" / \" + str(int(maxf)+1) + \": \" + falseChars(tip, sol) +\\\n \"<form action=\\\"/\\\" method=\\\"POST\\\">\"+\\\n \"<input type=\\\"text\\\" name=\\\"ntip\\\" maxlength=\\\"1\\\">\"+\\\n \"<input type=\\\"submit\\\" name=\\\"guess\\\" value=\\\"raten\\\"> <br />\" +\\\n pics[int(curf)]\n\n@route('/', method='POST')\ndef do_index() -> \"string\":\n ntip = request.forms.get(\"ntip\")\n ttip = request.cookies.tip\n if not ntip:\n return \"\"\"\n <p>Please insert a letter. <br />\n <a href=\"http://localhost:8080\">go back -></a>\n </p>\n \"\"\"\n\n ntip = ntip.upper()\n if ntip.upper() in ttip:\n return \"\"\"\n <p>Please do not insert the same letter. <br />\n <a href=\"http://localhost:8080\">go back -></a>\n </p>\n \"\"\"\n else:\n tip = ttip + ntip\n response.set_cookie(\"tip\", tip)\n\n sol = request.cookies.sol\n curf = request.cookies.curf\n maxf = request.cookies.maxf\n\n if not rightChoice(tip, sol) and curf < maxf:\n if len(falseChars(ntip, sol)) > 0:\n curf = int(curf) + 1\n response.set_cookie(\"curf\", str(curf))\n curf = str(curf)\n\n return \"<link href='http://fonts.googleapis.com/css?family=Source+Code+Pro' rel='stylesheet' type='text/css'>\" +\\\n '<span style=\"font-family:\\'Source Code Pro\\'\">' + partSol(tip, sol) + '</span>' + \"<br />\" +\\\n \"Faults \" + curf + \" / \" + str(int(maxf)+1) + \": \" + falseChars(tip, sol) +\\\n \"<form action=\\\"/\\\" method=\\\"POST\\\">\"+\\\n \"<input type=\\\"text\\\" name=\\\"ntip\\\" maxlength=\\\"1\\\">\"+\\\n \"<input type=\\\"submit\\\" name=\\\"guess\\\" value=\\\"raten\\\"> <br />\" +\\\n pics[int(curf)]\n else:\n response.set_cookie(\"tip\", \"\", expires=0)\n response.set_cookie(\"sol\", \"\", expires=0)\n response.set_cookie(\"curf\", \"\", expires=0)\n response.set_cookie(\"maxf\", \"\", expires=0)\n response.set_cookie(\"sessionid\", \"\", expires=0)\n\n if (rightChoice(tip, sol) and curf < maxf):\n return '<p style=\"font-size:300px\">勝ち</p><br />'\n else:\n return '<h1>負け・・・死んだ</h1>' + pics[int(maxf)]\n\nrun(host='localhost', port=8080)\n" }, { "alpha_fraction": 0.5015384554862976, "alphanum_fraction": 0.510769248008728, "avg_line_length": 17.932039260864258, "blob_id": "db651dbe11e1a6338a5f90b9f2fa5c63729ff6c0", "content_id": "1c632d8e46b21701d279cea79c9398d7c8f0fbcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1954, "license_type": "no_license", "max_line_length": 76, "num_lines": 103, "path": "/hashtagman.py", "repo_name": "TeamCDG/hashtagman", "src_encoding": "UTF-8", "text": "from random import *\n\ntrue = True\nfalse = False\n\nNone if False else None\n\ngreetings = [\n \"Urusai\",\n \"Zatknis\",\n \"Hallo!\",\n \"Merde\",\n \"Theophilus stultus est\",\n \"Greetings summoner\"\n ]\n\nli = ['KATZENKLO', 'MEDIZINBALL', 'SCHNEEMANN',\n 'AUTOBAHN', 'SCHULBUS', 'RHABARBERSAFT', 'HIPPOGREIF']\n\nversion = \"#HashtagMan v0.1.0a2\"\n\ndef partSol(tip, sol):\n \"\"\"\n Beispiel:\n >>> partSol('AEDKLM', 'KATZENKLO')\n 'KA__E_KL_'\n \"\"\"\n\n x = \"\".ljust(len(sol), \"_\")\n for i in range(len(sol)):\n if sol[i] in tip:\n x = x[:i] + sol[i] + x[i+1:]\n return x\n\ndef hits(tip, sol):\n \"\"\"\n Beispiel:\n >>> hits('AEDKLM', 'KATZENKLO')\n 5\n \"\"\"\n h = 0\n for i in range(len(sol)):\n if sol[i] in tip:\n h += 1\n return h\n\ndef falseChars(tip, sol):\n \"\"\"\n Beispiel:\n >>> falseChars('AEDKLM', 'KATZENKLO')\n 'DM'\n \"\"\"\n f = \"\"\n for c in tip:\n if not c in sol:\n f += c\n return f\n\ndef rightChoice(tip, sol):\n \"\"\"\n >>> rightChoice('AEDKLM', 'KATZENKLO')\n False\n >>> rightChoice('AEDKLMNTZO', 'KATZENKLO')\n True\n \"\"\"\n\n r = true\n for c in sol:\n if not c in tip:\n r = false\n break\n return r\n\ndef greeting():\n \"\"\"\n Gibt zufällig eine passende Begrüßung zurück.\n \"\"\"\n return greetings[randint(0, len(greetings)-1)]\n\ndef guess():\n tip = \"\"\n print(version, \": \", greeting())\n sol = li[randint(0, len(li)-1)]\n maxf = 10\n curf = 0\n\n while not rightChoice(tip, sol) and curf < maxf:\n print(partSol(tip, sol))\n print(\"faults \"+str(curf)+\" / \"+str(maxf)+\": \"+falseChars(tip, sol))\n c = input(\"tip: \").upper()[0]\n if(len(falseChars(c, sol)) > 0):\n curf +=1\n tip += c\n\n if(rightChoice(tip, sol)):\n print(\"sieg\")\n else:\n print(\"niederlage\")\n\n#guess()\n\nfrom doctest import testmod\n#testmod(verbose=True)\n" } ]
2
pushshift/imdb_to_json
https://github.com/pushshift/imdb_to_json
9776a783e1c8b2ddabbd5b845962c28e80d2cb73
95713621a8497342f70d590f831fd2fdfc542e76
1b6c18c2846f4bf1ae6b0305c15746ac7c744415
refs/heads/master
2022-12-10T18:28:48.243753
2020-09-06T02:10:28
2020-09-06T02:10:28
288,124,968
9
0
null
null
null
null
null
[ { "alpha_fraction": 0.5479972958564758, "alphanum_fraction": 0.5588135719299316, "avg_line_length": 35.5216064453125, "blob_id": "bf0c9a040996389222b0f42585acd15e14e702b9", "content_id": "3a5802326e4d938bbf2c8eaceffbed0aa2d79892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11834, "license_type": "no_license", "max_line_length": 132, "num_lines": 324, "path": "/convert_imdb_to_json.py", "repo_name": "pushshift/imdb_to_json", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport ujson as json\nimport requests\nfrom selectolax.parser import HTMLParser\nfrom datetime import datetime\nimport sys\nimport re\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\ndef plotsummary(title='tt0187393'):\n '''Get Plot Summaries for Title'''\n r = requests.get(f\"https://www.imdb.com/title/{title}/plotsummary\")\n p = HTMLParser(r.content)\n summaries = []\n summaries_data = p.css(\"li.ipl-zebra-list__item\")\n if summaries_data is not None:\n for summary in summaries_data:\n obj = {}\n obj['author'] = None\n author_data = summary.css_first(\"div.author-container\")\n if author_data is not None:\n obj['author'] = author_data.text().strip()\n summary.strip_tags([\"div.author-container\"])\n obj['summary'] = summary.text().strip()\n summaries.append(obj)\n\n return summaries\n\ndef keywords(title='tt0187393'):\n '''Get keywords data for title'''\n r = requests.get(f\"https://www.imdb.com/title/{title}/keywords\")\n p = HTMLParser(r.content)\n keywords_data = p.css(\"div.sodatext\")\n keywords = []\n if keywords_data is not None:\n for keyword in keywords_data:\n keywords.append(keyword.text().strip())\n\n return keywords\n\ndef reviews(title='tt0187393'):\n '''Get detailed review data for title'''\n\n def process_reviews(html):\n p = HTMLParser(html)\n div = p.css_first(\"div.lister-list\")\n div = div.css(\"div.lister-item\")\n\n for item in div:\n review = {}\n rating_section = item.css_first(\"div.ipl-ratings-bar\")\n if rating_section is not None:\n review['rating'] = int(rating_section.text().strip().split(\"/\")[0])\n\n author = item.css_first(\"span.display-name-link\")\n review['author'] = {}\n review['author']['name'] = author.text().strip()\n link = author.css_first(\"a\")\n review['author']['id'] = link.attrs['href'].strip()\n review['date'] = item.css_first(\"span.review-date\").text().strip()\n review['epoch_date'] = int(datetime.strptime(review['date'], '%d %B %Y').timestamp())\n content_div = item.css_first(\"div.text\")\n content = content_div.text().strip()\n review['content'] = content\n stats = item.css_first(\"div.actions\").text().strip()\n nums = re.findall(r'[0-9,]+',stats)\n review['review_was_helpful'] = {'helpfulCount':int(nums[0].replace(\",\",\"\")), 'totalCount': int(nums[1].replace(\",\",\"\"))}\n reviews.append(review)\n pagination = p.css_first(\"div.load-more-data\")\n pagination_key = None\n if pagination is not None:\n pagination_key = pagination.attrs['data-key'].strip()\n return pagination_key\n\n r = requests.get(f\"https://www.imdb.com/title/{title}/reviews\")\n reviews = []\n\n while True:\n pagination_key = process_reviews(r.content)\n if pagination_key is None:\n break\n logging.info(f\"Getting more reviews using pagination key: {pagination_key}. Total reviews ingested: {len(reviews)}.\")\n params = {'paginationKey': pagination_key}\n r = requests.get(f\"https://www.imdb.com/title/{title}/reviews/_ajax\", params=params)\n process_reviews(r.content)\n\n logging.info(f\"Total reviews ingested: {len(reviews)}.\")\n return reviews\n\ndef ratings(title='tt0187393'):\n '''Get detailed ratings data for title'''\n r = requests.get(f\"https://www.imdb.com/title/{title}/ratings\")\n p = HTMLParser(r.content)\n div = p.css_first(\"div.allText\")\n output = {}\n fields = div.text().strip().split(\"\\n\")\n num_votes = int(fields[0].replace(\",\",\"\"))\n avg_rating = float(re.search(r\"[\\d,\\.]+\", fields[1])[0])\n output['globalRating'] = {}\n output['globalRating']['numVotes'] = num_votes\n output['globalRating']['avgRating'] = avg_rating\n tables = p.css(\"table\")\n fields = tables[0].text().strip().split(\"\\n\")\n\n rating_fields = []\n for f in fields:\n f = f.strip()\n if f != \"\":\n rating_fields.append(f)\n\n output['detailedRatings'] = []\n rating_fields = rating_fields[2:]\n for x in range(0,10):\n obj = {}\n rating = int(rating_fields[x*3])\n num_votes = int(rating_fields[(x*3)+2].replace(\",\",\"\"))\n obj['rating'] = rating\n obj['numVotes'] = num_votes\n output['detailedRatings'].append(obj)\n\n demographic_data = tables[1].text().strip().split(\"\\n\")\n\n rating_fields = []\n for f in demographic_data:\n f = f.strip()\n if f != \"\":\n rating_fields.append(f)\n\n output['demographicRatings'] = {}\n output['demographicRatings']['all'] = {}\n output['demographicRatings']['males'] = {}\n output['demographicRatings']['females'] = {}\n rf = rating_fields\n for idx, f in enumerate(rf[0:5]):\n output['demographicRatings']['all'][f] = {'rating':float(rf[(idx*2)+6]),'numVotes':int(rf[(idx*2)+7].replace(\",\",\"\"))}\n output['demographicRatings']['males'][f] = {'rating':float(rf[(idx*2)+17]),'numVotes':int(rf[(idx*2)+18].replace(\",\",\"\"))}\n output['demographicRatings']['females'][f] = {'rating':float(rf[(idx*2)+28]),'numVotes':int(rf[(idx*2)+29].replace(\",\",\"\"))}\n\n output['geographicRatings'] = {}\n output['geographicRatings']['US'] = {}\n output['geographicRatings']['non-US'] = {}\n output['geographicRatings']['top1000Users'] = {}\n\n geographic_data = tables[2].text().strip().split(\"\\n\")\n\n rf = []\n for f in geographic_data:\n f = f.strip()\n if f != \"\":\n rf.append(f)\n\n output['geographicRatings']['top1000Users'] = {'rating':float(rf[3]),'numVotes':int(rf[4].replace(\",\",\"\"))}\n output['geographicRatings']['US'] = {'rating':float(rf[5]),'numVotes':int(rf[6].replace(\",\",\"\"))}\n output['geographicRatings']['non-US'] = {'rating':float(rf[7]),'numVotes':int(rf[8].replace(\",\",\"\"))}\n\n return output\n\n\ndef fullcredits(title='tt0187393'):\n '''Get full credits (cast) for title'''\n r = requests.get(f\"https://www.imdb.com/title/{title}/fullcredits\")\n p = HTMLParser(r.content)\n tables = p.css(\"h4.dataHeaderWithBorder + table.simpleCreditsTable\")\n headers = p.css(\"h4.dataHeaderWithBorder:not([id])\")\n\n main_cast = []\n\n # Check if movie or a series\n cast_header = p.css_first(\"h4#cast\")\n cast_header_text = cast_header.text().strip()\n show_type = \"movie\"\n if cast_header_text.lower().startswith('series'):\n show_type = \"series\"\n\n for idx, table in enumerate(tables):\n trs = table.css(\"tr\")\n category = headers[idx].text().strip()\n for tr in trs:\n actor = {}\n td = tr.css(\"td\")\n a = td[0].css_first(\"a\")\n if a is not None:\n actor['id'] = a.attrs['href'].split(\"?\",1)[0]\n actor['name'] = a.text().strip()\n else:\n continue\n if len(td) > 2:\n actor['description'] = td[2].text().strip()\n actor['category'] = category\n main_cast.append(actor)\n\n cast_list = p.css_first(\"table.cast_list\")\n rows_odd = cast_list.css(\"tr.odd\")\n rows_even = cast_list.css(\"tr.even\")\n rows = [val for pair in zip(rows_odd, rows_even) for val in pair] # Join rows by interleaving to maintain order\n\n if show_type == \"movie\":\n for row in rows:\n actor = {}\n actor['category'] = \"Cast\"\n tds = row.css(\"td\")\n actor['image_link'] = None\n photo = tds[0].css_first(\"a\")\n if photo is not None:\n img = photo.css_first(\"img\")\n if img is not None:\n if 'loadlate' in img.attrs:\n actor['image_link'] = img.attrs['loadlate']\n\n a = tds[1].css_first(\"a\")\n actor['actor_id'] = a.attrs['href'].strip().rsplit(\"/\",1)[0]\n actor['actor_name'] = a.text().strip()\n a = tds[3].css_first(\"a\")\n if a is not None and a.attrs['href'] != \"#\":\n actor['character_id'] = a.attrs['href'].strip().rsplit(\"?\",1)[0]\n actor['character_name'] = a.text().strip()\n else:\n actor['character_name'] = re.sub(' +', ' ', tds[3].text().strip().replace(\"\\n\",\"\"))\n main_cast.append(actor)\n\n elif show_type == \"series\":\n for row in rows:\n actor = {}\n actor['category'] = \"Cast\"\n tds = row.css(\"td\")\n if len(tds) != 4:\n continue\n actor['image_link'] = None\n photo = tds[0].css_first(\"a\")\n if photo is not None:\n img = photo.css_first(\"img\")\n if img is not None:\n if 'loadlate' in img.attrs:\n actor['image_link'] = img.attrs['loadlate']\n\n\n\n a = tds[1].css_first(\"a\")\n actor['actor_id'] = a.attrs['href'].strip().rsplit(\"/\",1)[0]\n actor['actor_name'] = a.text().strip()\n a = tds[3].css_first(\"a\")\n if a is not None and a.attrs['href'] != \"#\":\n actor['character_id'] = a.attrs['href'].strip().rsplit(\"?\",1)[0]\n actor['character_name'] = a.text().strip()\n else:\n actor['character_name'] = re.sub(' +', ' ', tds[3].text().strip().replace(\"\\n\",\"\"))\n main_cast.append(actor)\n\n return main_cast\n\n\ndef fetch_section(title='tt0117731', section='trivia'):\n '''This method will fetch data for a particular section (trivia, goofs, quotes, etc.)'''\n r = requests.get(f\"https://www.imdb.com/title/{title}/{section}\")\n p = HTMLParser(r.content)\n\n sodavote = p.css(\".sodavote\")\n list = p.css(\"div.list\")\n\n output = {}\n output['trivia'] = []\n\n for l in list:\n sodavote = l.css(\".sodavote\")\n category = l.css_first(\"h4.li_group\")\n\n category_type = 'Basic'\n\n if category is not None:\n category_type = category.text().strip()\n\n for obj in sodavote:\n item = {}\n item['category'] = category_type\n item['id'] = obj.id\n sodatext = obj.css_first(\".sodatext\")\n links = sodatext.css(\"a\")\n\n item['associations'] = []\n seen_associations = set()\n\n for link in links:\n association = {}\n association['id'] = link.attrs['href']\n if association['id'] in seen_associations:\n continue\n seen_associations.add(association['id'])\n association['text'] = link.text()\n item['associations'].append(association)\n\n trivia_text = sodatext.text()\n item['text'] = trivia_text.strip()\n output['trivia'].append(item)\n\n return output['trivia']\n\n\noutput = {}\nif len(sys.argv) < 2:\n print (\"Must provide title for movie / episide (e.g. 'tt0117731')\")\n sys.exit()\ntitle = sys.argv[1]\n\nfor type in ['goofs','quotes','trivia','crazycredits']:\n\n logging.info(f\"Fetching {type} data from IMDB.\")\n section = fetch_section(title, type)\n output[type] = section\n\nlogging.info(\"Fetching keywords for title from IMDB.\")\noutput['keywords'] = keywords(title=title)\nlogging.info(\"Fetching plot summaries for title from IMDB.\")\noutput['summaries'] = plotsummary(title=title)\nlogging.info(\"Fetching full credits from IMDB.\")\noutput['credits'] = fullcredits(title=title)\nlogging.info(\"Fetching extended ratings from IMDB.\")\noutput['rating'] = ratings(title=title)\nlogging.info(\"Fetching all available reviews from IMDB.\")\noutput['reviews'] = reviews(title=title)\n\n# Dump data in json format\nprint(json.dumps(output, ensure_ascii=False, escape_forward_slashes=False))\n\n" }, { "alpha_fraction": 0.7476525902748108, "alphanum_fraction": 0.7852112650871277, "avg_line_length": 36, "blob_id": "cb4013ec1e1802116c566163aab20564a53f2cd9", "content_id": "8ca14c12900b5ddf273b9c29ac857505a608c5fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 852, "license_type": "no_license", "max_line_length": 398, "num_lines": 23, "path": "/README.md", "repo_name": "pushshift/imdb_to_json", "src_encoding": "UTF-8", "text": "This code will fetch data using a title code and convert the data to JSON format.\n\nExample of usage:\n\n./convert_imdb_to_json.py tt0117731\n\nThe movie_data.ndjson.zst file contains movie / episode data for over 1 million shows. The data is in ndjson format and is sorted by the number of votes. This data also contains basic metadata for each show including genres, start/end year, title, adult classification and run time. You can use the \"titleCode\" in each movie object to download data from IMDB in json format using the code provided.\n\nMovie title codes and other metadata can be downloaded from here: https://datasets.imdbws.com/\n\nThis should get you started.\n\n2020-08-19: Added ratings metadata\n\n2020-08-19: Added reviews metadata\n\n2020-08-19: Added title keywords metada\n\nTo do:\n\nAdd additional logging\n\nAdd better error handling (for requests)\n\n" } ]
2
sgallaher/lesson_15_06_20
https://github.com/sgallaher/lesson_15_06_20
7538edef7bd14dbd0c38597f1cd64dde874a8e25
6ce0dd4a672478bd0aaa13118dd181bdf1394d90
e145a48fb5c79406358b33ba258dcfea15d10f72
refs/heads/master
2022-10-18T05:05:38.376934
2020-06-11T12:06:48
2020-06-11T12:06:48
271,534,498
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5443037748336792, "alphanum_fraction": 0.5493670701980591, "avg_line_length": 18.200000762939453, "blob_id": "e027b26d724adcbd3129d4089b3d6039c6a26343", "content_id": "52e06f66c52edf326dca825a985d25eca930e282", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 33, "num_lines": 20, "path": "/class1.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "class person:\n num=0\n def __init__(self,name):\n self.ident=person.num\n self.name=name\n person.num+=1\n def getme(self):\n print(self.name)\n\n def nextid():\n print(person.num)\n \npeople = []\npeople.append(person(\"shane\"))\npeople.append(person(\"Harry\"))\n\nfor p in people:\n p.getme()\n print(f\"{p.ident}, {p.name}\")\nperson.nextid()\n\n\n \n" }, { "alpha_fraction": 0.5399129390716553, "alphanum_fraction": 0.5457184314727783, "avg_line_length": 23.175437927246094, "blob_id": "328dcfec843bd6ca058d4b32fe501f37abad46a5", "content_id": "1383e2bc914a5b1043680aa3639a07dfeed89d9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 84, "num_lines": 57, "path": "/class3.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "#getting external files (CSV) into a dictionary\n\n#need to import module to handle csv files\nimport csv\n#this is global contacts list\ncontacts=[]\n\n#defining a class structure for a contact \nclass contacts:\n num=0\n def __init__(self,ident,contact):\n self.ident=ident\n self.contact=contact\n num+=1\n\n def nextid():\n return num\n \nclass contact(contacts):\n def __init__(self, name, tel, address):\n \n self.name=name\n self.tel=tel\n self.address=address\n\n\n\ndef get_contacts():\n \n with open(\"contacts2.csv\") as file:\n data = csv.reader(file, delimiter=',')\n for person in data:\n c=contacts(0,None)\n \n ident=contacts.nextid()\n \n \n temp=contact(person[0],person[1],person[2])\n entry=contacts(ident,temp)\n \n \nget_contacts()\n\n# now to search\nsearch = True\n\nwhile search:\n results=[]\n name=input(\"Enter person: \").capitalize()\n for contact in contacts:\n if contacts.contact.name == name:\n results.append(contacts.contact)\n if len(results)>0:\n for result in results:\n print(f\"Name:{result.name}, Tel:{result.tel}, Address:{result.address}\")\n else:\n print(f\"{name} not found\")\n" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5934873819351196, "avg_line_length": 23.41025733947754, "blob_id": "5b598d58ab39925a65267893861f433847f5aa06", "content_id": "70994cfc074a8aec49c6176b965b6bc39b021d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 84, "num_lines": 39, "path": "/class2.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "#getting external files (CSV) into a dictionary\n\n\nimport csv\n#this is global contacts list\ncontacts=[]\n\n#defining a class structure for a contact \nclass contact():\n def __init__(self, name, tel, address):\n self.name=name\n self.tel=tel\n self.address=address\n\n\n\ndef get_contacts():\n \n with open(\"contacts2.csv\") as file:\n data = csv.reader(file, delimiter=',')\n for person in data:\n contacts.append(contact(person[0].capitalize(),person[1],person[2]))\n \nget_contacts()\n\n# now to search\nsearch = True\n\nwhile search:\n results=[]\n name=input(\"Enter person: \").capitalize()\n for contact in contacts:\n if contact.name == name:\n results.append(contact)\n if len(results)>0:\n for result in results:\n print(f\"Name:{result.name}, Tel:{result.tel}, Address:{result.address}\")\n else:\n print(f\"{name} not found\")\n" }, { "alpha_fraction": 0.3989361822605133, "alphanum_fraction": 0.542553186416626, "avg_line_length": 17.299999237060547, "blob_id": "4b243ebf050e50986c33ab3b248261962a2fdb41", "content_id": "2ed5f258f32ed6b016421b20b5c2e340c3b83f99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 35, "num_lines": 10, "path": "/dict1.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "#dictionaries in Python\n\npeople = {\n \"Hermione\": \"555-555-111\",\n \"Ron\": \"555-555-222\",\n \"Harry\": \"555-555-333\"\n }\n\nif \"Ron\" in people:\n print(f\"Found {people['Ron']}\")\n \n" }, { "alpha_fraction": 0.6506550312042236, "alphanum_fraction": 0.6506550312042236, "avg_line_length": 19.363636016845703, "blob_id": "1ea87fd65aef08f9d7982b4f71a7702a30d39428", "content_id": "fc405cfb4a0e9148705d2a48cc1b98602f5cef5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/dict4.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "#getting external files (CSV) into a dictionary\n\n\nimport csv\n\n\n\ndef contacts():\n with open(\"contacts.csv\") as file:\n data = csv.reader(file, delimiter=',')\n people=dict(data)\n return people\n\n\n#open contacts\npeople = contacts()\n#get input\nperson = input(\"Enter name: \").capitalize()\nif person in people:\n print(f\"{person}'s phone number is {people[person]}\")\nelse:\n print(f\"You don't seem to have {person}'s phone number\")\n \n \n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5687500238418579, "avg_line_length": 23.19230842590332, "blob_id": "0377c3f23a992b619d42ce666b62cd5a59a6ca28", "content_id": "2475d1c133914e9baf732559b8d9e8288876165a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 76, "num_lines": 26, "path": "/tempclass.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "#getting external files (CSV) into a dictionary\n\n\nimport csv\n\ncontacts=[]\n \nclass contact():\n def __init__(self, name, tel, address):\n self.name=name\n self.tel=tel\n self.address=address\n def tel(self,name):\n return self.tel\n\n\ndef get_contacts():\n \n with open(\"contacts2.csv\") as file:\n data = csv.reader(file, delimiter=',')\n for person in data:\n contacts.append(contact(person[0],person[1],person[2]))\n \nget_contacts()\nfor contact in contacts:\n print(f\"{contact.name}, tel: {contact.tel}, Address: {contact.address}\")\n\n \n \n" }, { "alpha_fraction": 0.5037593841552734, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 20.75, "blob_id": "3ae7b0879cd14af796b77222ba274718b6a73fc0", "content_id": "b29ddeef96a980673877bf07ef81e63371a51434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/dict2.py", "repo_name": "sgallaher/lesson_15_06_20", "src_encoding": "UTF-8", "text": "#dictionaries in Python\n\npeople = {\n \"Hermione\": \"555-555-111\",\n \"Ron\": \"555-555-222\",\n \"Harry\": \"555-555-333\"\n }\n\n#get input\nperson = input(\"Enter name: \").capitalize()\nif person in people:\n print(f\"{person}'s phone number is {people[person]}\")\n \n" } ]
7
mtnzorro/digita_crafts_week3_python_pygame_monster_game
https://github.com/mtnzorro/digita_crafts_week3_python_pygame_monster_game
f6bb3e5c67bdb1bda784a49fcae925a15dee31f2
724f556e77e3bdf9c0a995b8521a951cb2c7e01f
28643cf91bb234546cac59c26097b02be7cf865c
refs/heads/master
2021-01-12T12:03:05.840985
2016-10-04T17:45:26
2016-10-04T17:45:26
69,911,214
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4626414179801941, "alphanum_fraction": 0.48426124453544617, "avg_line_length": 26.216463088989258, "blob_id": "eb4857abfb32c70801713134b73d5ac8de5e9c45", "content_id": "dc565560a7a950de2fab57021246574d6753f8d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8927, "license_type": "no_license", "max_line_length": 101, "num_lines": 328, "path": "/monster_game.py", "repo_name": "mtnzorro/digita_crafts_week3_python_pygame_monster_game", "src_encoding": "UTF-8", "text": "import pygame\nimport time\nimport random\n\nKEY_UP = 273\nKEY_DOWN = 274\nKEY_LEFT = 276\nKEY_RIGHT = 275\nENTER = 13\n\nclass Enemy(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.change_dir_counter = 120\n\n def move(self, width, height):\n self.x += self.speed_x\n self.y += self.speed_y\n if self.x > width:\n self.x = 0\n if self.y > height:\n self.y = 0\n if self.x < 0:\n self.x = width\n if self.y < 0:\n self.y = height\n\n self.maybe_direction_change()\n\n\n def maybe_direction_change(self):\n self.change_dir_counter -=1\n if self.change_dir_counter <= 0:\n self.change_dir_counter = 120\n self.change_dir()\n\n def change_dir(self):\n east_west = random.randint(1,2)\n if east_west == 1:\n new_speed_x = -2\n else:\n new_speed_x = 2\n north_south = random.randint(1,2)\n if north_south == 1:\n new_speed_y = -2\n else:\n new_speed_y = 2\n vert_horiz = random.randint(1,2)\n if vert_horiz == 1:\n self.speed_x = new_speed_x\n self.speed_y = 0\n else:\n self.speed_y = new_speed_y\n self.speed_x = 0\n\n def render(self, screen):\n screen.blit(self.img, (self.x, self.y))\n\n def respawn(self, width, height):\n self.x = random.randint(0, width)\n self.y = random.randint(0, height)\n\n def freeze(self):\n self.speed_x = 0\n self.speed_y = 0\n\n\nclass Hero(object):\n def __init__(self):\n self.x = 235\n self.y = 235\n self.speed_x = 0\n self.speed_y = 0\n self.img = pygame.image.load(\"images/hero.png\").convert_alpha()\n self.count = 0\n self.level = 'Level: 0'\n\n def move(self, width, height):\n self.x += self.speed_x\n self.y += self.speed_y\n\n if self.x > 450:\n self.x = 450\n if self.y > 415:\n self.y = 415\n if self.x < 31:\n self.x = 31\n if self.y < 31:\n self.y = 31\n\n def move_event(self,event, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT):\n if event.type == pygame.KEYDOWN:\n # activate the cooresponding speeds\n # when an arrow key is pressed down\n if event.key == KEY_DOWN:\n self.speed_y = 3\n elif event.key == KEY_UP:\n self.speed_y = -3\n elif event.key == KEY_LEFT:\n self.speed_x = -3\n elif event.key == KEY_RIGHT:\n self.speed_x = 3\n if event.type == pygame.KEYUP:\n # deactivate the cooresponding speeds\n # when an arrow key is released\n if event.key == KEY_DOWN:\n self.speed_y = 0\n elif event.key == KEY_UP:\n self.speed_y = 0\n elif event.key == KEY_LEFT:\n self.speed_x = 0\n elif event.key == KEY_RIGHT:\n self.speed_x = 0\n\n def render(self, screen):\n screen.blit(self.img, (self.x, self.y))\n\n def contact(self, prey):\n if prey.x + 32 < self.x:\n pass\n elif self.x + 32 < prey.x:\n pass\n elif prey.y + 32 < self.y:\n pass\n elif self.y + 32 < prey.y:\n pass\n else:\n return True\n\n def freeze(self):\n self.speed_x = 0\n self.speed_y = 0\n\n def level_up(self, level):\n self.count += 1\n self.level = 'Level: %d' % self.count\n\n\n\nclass Monster(Enemy):\n def __init__(self):\n self.x = 30\n self.y = 30\n self.speed_x = 2\n self.speed_y = 0\n self.img = pygame.image.load(\"images/monster.png\").convert_alpha()\n self.change_dir_counter = 120\n\n\nclass Goblin(Enemy):\n def __init__(self):\n self.x = 0#random.randint(30, 300)\n self.y = 100#random.randint(30, 300)\n self.speed_x = 1\n self.speed_y = 1\n self.img = pygame.image.load('images/goblin.png').convert_alpha()\n self.change_dir_counter = 60\n\n def contact(self, prey):\n if prey.x + 32 < self.x:\n pass\n elif self.x + 32 < prey.x:\n pass\n elif prey.y + 32 < self.y:\n pass\n elif self.y + 32 < prey.y:\n pass\n else:\n return True\n\n\ndef main():\n # declare the size of the canvas\n width = 512\n height = 480\n blue_color = (97, 159, 182)\n game_over = False\n goblin_win = False\n level= 0\n level_str = '%d' % level\n\n # initialize the pygame framework\n pygame.init()\n\n # create screen\n screen = pygame.display.set_mode((width, height))\n\n # set window caption\n pygame.display.set_caption('Simple Example')\n\n # create a clock\n clock = pygame.time.Clock()\n\n ################################\n # PUT INITIALIZATION CODE HERE #\n ################################\n #music and sounds\n win_sound = pygame.mixer.Sound('sounds/smb_world_clear.wav')\n lose_sound = pygame.mixer.Sound('sounds/smb_mariodie.wav')\n pygame.mixer.init()\n pygame.mixer.music.load('sounds/castlevania.wav')\n pygame.mixer.music.play(-1)\n #counters for timing the change in direction of enemy Character\n change_dir_counter_monst = 120\n change_dir_counter_gob = 180\n\n\n\n #calling instances of Characters\n monster = Monster()\n hero = Hero()\n goblin_list = []\n #background image\n bkgr_image = pygame.image.load('images/background.png').convert_alpha()\n\n #level counter\n\n # screen.blit(level_cnt, (50, 100))\n\n #time\n # now = time.time()\n # time_til_direction_change = now + 2\n #section below would go into the loop\n # if now >= time_til_direction_change:\n # time_til_direction_change = now + 2\n # game loop\n stop_game = False\n while not stop_game:\n\n # look through user events fired\n for event in pygame.event.get():\n ###############################\n #PUT EVENT HANDLING CODE HERE #\n ################################\n\n hero.move_event(event, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT)\n\n if event.type == pygame.KEYDOWN:\n if event.key == ENTER:\n if goblin_win == False:\n goblin_list.append(Goblin())\n hero.level_up(level)\n\n print level\n else:\n pass\n game_over = False\n goblin_win = False\n monster.respawn(width, height)\n for goblin in goblin_list:\n goblin.respawn(width, height)\n\n if event.type == pygame.QUIT:\n # if they closed the window, set stop_game to True\n # to exit the main loop\n stop_game = True\n\n\n #######################################\n # PUT LOGIC TO UPDATE GAME STATE HERE #\n #######################################\n monster.move(width,height)\n hero.move(width, height)\n for goblin in goblin_list:\n goblin.move(width, height)\n\n if not game_over:\n if hero.contact(monster):\n win_sound.play()\n game_over = True\n\n else:\n for goblin in goblin_list:\n if goblin.contact(hero):\n lose_sound.play()\n goblin_win = True\n game_over = True\n\n ################################\n # PUT CUSTOM DISPLAY CODE HERE #\n ################################\n # renders background image\n\n screen.blit(bkgr_image, (0, 0))\n\n font = pygame.font.Font(None, 25)\n level_cnt = font.render(hero.level, True, (0, 0, 0))\n screen.blit(level_cnt, (35, 35))\n\n #renders hero image\n\n\n hero.render(screen)\n if game_over:\n hero.freeze()\n if goblin_win == True:\n font = pygame.font.Font(None, 25)\n text = font.render('THE GOBLIN GOT YOU!!! Hit ENTER to play again!', True, (0, 0, 0))\n screen.blit(text, (50, 230))\n else:\n font = pygame.font.Font(None, 25)\n text = font.render('Hit ENTER to play again!', True, (0, 0, 0))\n screen.blit(text, (150, 230))\n\n # level += 1\n\n else:\n monster.render(screen)\n for goblin in goblin_list:\n goblin.render(screen)\n\n\n\n pygame.display.update()\n\n\n\n # tick the clock to enforce a max framerate\n clock.tick(60)\n\n\n\n # quit pygame properly to clean up resources\n pygame.quit()\n\nif __name__ == '__main__':\n main()\n" } ]
1
kibach/kerbor
https://github.com/kibach/kerbor
8ab03a37f3300e7378565b1d628b299a8c599e34
1f5133249736bb0507765bd1e58bf97aa73969ce
462b5bb110bdd0951c0ec96b2cf5595a25fb9ae4
refs/heads/master
2021-01-19T16:48:25.092398
2017-04-15T16:58:39
2017-04-15T16:58:39
88,287,992
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5863309502601624, "alphanum_fraction": 0.5863309502601624, "avg_line_length": 19.850000381469727, "blob_id": "dca29e7d19a518dffb55cd1408e59982e7ff82e2", "content_id": "6cebf23e3ef1dbbee7d7aaf8062ee9cdeb64f5c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 54, "num_lines": 40, "path": "/server/storage.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "from simplekv.fs import FilesystemStore\nimport pickle\n\n\nclass BaseStorage(object):\n storage = None\n\n def get(self, key):\n pass\n\n def persist(self, key, _object):\n pass\n\n\nclass DictStorage(BaseStorage):\n def __init__(self):\n self.storage = {}\n\n def get(self, key):\n if key in self.storage:\n return self.storage[key]\n else:\n return None\n\n def persist(self, key, _object):\n self.storage[key] = _object\n\n\nclass SimpleKVStorage(BaseStorage):\n def __init__(self):\n self.storage = FilesystemStore('./.data')\n\n def get(self, key):\n if key in self.storage:\n return pickle.loads(self.storage.get(key))\n else:\n return None\n\n def persist(self, key, _object):\n self.storage.put(key, pickle.dumps(_object))\n" }, { "alpha_fraction": 0.6393442749977112, "alphanum_fraction": 0.645603597164154, "avg_line_length": 29.2252254486084, "blob_id": "6a5d8913b2daa407214b915894cc05bff5bd6922", "content_id": "aeb6fabbff62c36412d2c8ef7f90093748563867", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3355, "license_type": "no_license", "max_line_length": 116, "num_lines": 111, "path": "/server/kerborsrv.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "from shared import messages\nimport time, datetime\n\n\ntgs_keys = {\n 'default': '9xziLRNZuqKUP5TmxIfObNvPQDR2PGw7'\n}\n\nserver_keys = {\n 'database': 'YO4xNirT45SKSOsLSLZt7VNpvhyqfNES'\n}\n\n\nclass KerborBaseServer(object):\n user_server = None\n accepted_message = None\n\n def __init__(self, _user_server):\n self.user_server = _user_server\n\n def handle(self, message):\n if not isinstance(message, basestring):\n raise TypeError(\"message must be a json-encoded string\")\n\n class_name, message_object = self.accepted_message.deserialize(message)\n if class_name != str(self.accepted_message.__name__):\n return messages.FailMessage().serialize()\n\n return self.dispatch(message_object).serialize()\n\n def dispatch(self, message):\n return messages.FailMessage()\n\n\nclass KerborAuthenticationServer(KerborBaseServer):\n accepted_message = messages.AuthenticateMeMessage\n\n def dispatch(self, message):\n user = self.user_server.lookup(message.username)\n if user is None:\n return messages.FailMessage()\n\n if message.tgs not in tgs_keys:\n return messages.FailMessage()\n\n response = messages.AuthenticationResponseMessage(\n user.make_sess_key(),\n user.secret_key,\n user.make_tgt(),\n tgs_keys[message.tgs]\n )\n\n self.user_server.update(user)\n\n self.user_server.associate_tgt(user)\n\n return response\n\n\nclass KerborTicketGrantingServer(KerborBaseServer):\n accepted_message = messages.TicketRequestMessage\n tgs_key = 'default'\n\n def dispatch(self, message):\n if message.server not in server_keys:\n return messages.FailMessage()\n\n tgt = message.get_tgt(tgs_keys[self.tgs_key])\n user = self.user_server.resolve_tgt(tgt)\n _, id_message = message.get_id(user.sess_key)\n current_timestamp = int(time.mktime(datetime.datetime.utcnow().timetuple()))\n\n if id_message.username != user.username or abs(id_message.timestamp - current_timestamp) > 30:\n return messages.FailMessage()\n\n ticket = messages.TicketMessage(user.username, '127.0.0.1', 86400)\n grant = messages.GrantMessage(message.server, ticket, server_keys[message.server])\n response = messages.TicketGrantingMessage(\n grant,\n server_keys[message.server],\n ticket.sess_key,\n user.sess_key\n )\n\n return response\n\n\nclass KerborServiceServer(KerborBaseServer):\n accepted_message = messages.ServiceRequestMessage\n server_key = 'database'\n\n def dispatch(self, message):\n _, grant = message.get_grant(server_keys[self.server_key])\n _, ticket = grant.get_ticket(server_keys[self.server_key])\n sess_key = ticket.sess_key\n\n _, id_message = message.get_id(sess_key)\n\n if grant.server != self.server_key:\n return messages.FailMessage()\n\n current_timestamp = int(time.mktime(datetime.datetime.utcnow().timetuple()))\n\n if ticket.remote_address != '127.0.0.1' or current_timestamp - id_message.timestamp - ticket.valid_for > 30:\n return messages.FailMessage()\n\n if ticket.username != id_message.username:\n return messages.FailMessage()\n\n response = messages.ServiceGrantingMessage(id_message.timestamp, sess_key)\n return response\n" }, { "alpha_fraction": 0.5717045068740845, "alphanum_fraction": 0.5726702213287354, "avg_line_length": 24.567901611328125, "blob_id": "224d65599d489b03912dfbd3b637bcfdd5ac767f", "content_id": "0d6f78246aab56ee6e1206452eb637eabe2172ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 82, "num_lines": 81, "path": "/server/userserver.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "from cryptography.fernet import Fernet\nfrom storage import BaseStorage\nimport hmac\n\n\nclass BaseUserServer(object):\n user_storage = None\n\n def __init__(self, _user_storage):\n if not isinstance(_user_storage, BaseStorage):\n raise TypeError(\"_user_storage must be a subclass of BaseStorage\")\n\n self.user_storage = _user_storage\n\n\nclass User(object):\n username = ''\n password = ''\n secret_key = ''\n tgt = ''\n sess_key = ''\n perms = []\n\n def _get_key(self):\n return Fernet.generate_key()[:32]\n\n def __init__(self, _dict):\n if 'username' in _dict:\n self.username = _dict['username']\n\n if 'password' in _dict:\n self.password = _dict['password']\n\n if 'secret_key' in _dict:\n self.secret_key = _dict['secret_key']\n\n if 'tgt' in _dict:\n self.tgt = _dict['tgt']\n\n if 'sess_key' in _dict:\n self.sess_key = _dict['sess_key']\n\n def make_sess_key(self):\n self.sess_key = self._get_key()\n return self.sess_key\n\n def make_tgt(self):\n self.tgt = self._get_key()\n return self.tgt\n\n\nclass UserServer(BaseUserServer):\n\n def lookup(self, username):\n obj = self.user_storage.get('user_' + username)\n return obj\n\n def update(self, user):\n if not isinstance(user, User):\n raise TypeError(\"user must be an instance of User\")\n\n self.user_storage.persist('user_' + user.username, user)\n\n def associate_tgt(self, user):\n self.user_storage.persist('tgt_' + user.tgt, user.username)\n\n def resolve_tgt(self, tgt):\n username = self.user_storage.get('tgt_' + tgt)\n if username is None:\n return None\n\n return self.lookup(username)\n\n def register(self, _username, _password):\n user = User({\n 'username': _username,\n 'password': _password,\n 'secret_key': hmac.new(bytes(_password), bytes(_username)).hexdigest()\n })\n\n self.user_storage.persist('user_' + _username, user)\n" }, { "alpha_fraction": 0.5928885340690613, "alphanum_fraction": 0.5956851840019226, "avg_line_length": 25.77005386352539, "blob_id": "fbef18a51e3f32c916296d5b17e5231c8ad34f5b", "content_id": "6a95bf5e6907abf0bb916e5a92342a349dfdd59e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5006, "license_type": "no_license", "max_line_length": 81, "num_lines": 187, "path": "/shared/messages.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "from cryptography.fernet import Fernet\nfrom json import loads, dumps\nimport datetime\nimport time\nimport base64\n\n\nclass BaseMessage(object):\n _fernet = Fernet\n\n def _set_key(self, key):\n self._fernet = Fernet(base64.urlsafe_b64encode(key))\n\n def _encrypt(self, message):\n return self._fernet.encrypt(message)\n\n def _decrypt(self, message):\n return self._fernet.decrypt(bytes(message))\n\n def serialize(self):\n # type: () -> basestring\n fields = [('__classname', type(self).__name__)]\n for property_name, value in vars(self).iteritems():\n if not property_name.startswith('_') and not callable(value):\n fields.append((property_name, value))\n\n return dumps(fields)\n\n @classmethod\n def deserialize(cls, representation):\n try:\n fields = loads(representation)\n except Exception:\n return 'FailMessage', FailMessage()\n\n _, class_name = fields.pop(0)\n\n msg = cls.__new__(cls)\n for property_name, value in fields:\n setattr(msg, property_name, value)\n\n return class_name, msg\n\n\nclass FailMessage(BaseMessage):\n pass\n\n\nclass AuthenticateMeMessage(BaseMessage):\n username = ''\n tgs = ''\n\n def __init__(self, _username, _tgs):\n self.username = _username\n self.tgs = _tgs\n\n\nclass AuthenticationResponseMessage(BaseMessage):\n session_key = ''\n tgt = ''\n\n def __init__(self, _sess_key, _secret_key, _tgt, _tgs_key):\n self._set_key(_secret_key)\n self.session_key = self._encrypt(_sess_key)\n self._set_key(_tgs_key)\n self.tgt = self._encrypt(_tgt)\n\n def get_sess_key(self, _secret_key):\n self._set_key(_secret_key)\n return self._decrypt(self.session_key)\n\n\nclass IDMessage(BaseMessage):\n username = ''\n timestamp = 0\n id_session_key = ''\n\n def __init__(self, _username):\n self.username = _username\n self.timestamp = int(time.mktime(datetime.datetime.utcnow().timetuple()))\n self.id_session_key = '123'\n\n\nclass TicketRequestMessage(BaseMessage):\n server = ''\n id_message = ''\n tgt = ''\n\n def __init__(self, _server, _id, _sess_key, _tgt):\n self.server = _server\n self._set_key(_sess_key)\n self.id_message = self._encrypt(_id.serialize())\n self.tgt = _tgt\n\n def get_tgt(self, tgs_key):\n self._set_key(tgs_key)\n return self._decrypt(self.tgt)\n\n def get_id(self, _sess_key):\n self._set_key(_sess_key)\n representation = self._decrypt(self.id_message)\n return IDMessage.deserialize(representation)\n\n\nclass TicketMessage(BaseMessage):\n username = ''\n remote_address = ''\n valid_for = 0\n sess_key = ''\n\n def gen_sess_key(self):\n return self._fernet.generate_key()[:32]\n\n def __init__(self, _username, _address, _valid):\n self.username = _username\n self.remote_address = _address\n self.valid_for = _valid\n self.sess_key = self.gen_sess_key()\n\n\nclass GrantMessage(BaseMessage):\n server = ''\n ticket = ''\n\n def __init__(self, _server, _ticket, _serv_key):\n self.server = _server\n self._set_key(_serv_key)\n self.ticket = self._encrypt(_ticket.serialize())\n\n def get_ticket(self, _serv_key):\n self._set_key(_serv_key)\n representation = self._decrypt(self.ticket)\n return TicketMessage.deserialize(representation)\n\n\nclass TicketGrantingMessage(BaseMessage):\n grant = ''\n c_s_session_key = ''\n\n def __init__(self, _grant, _serv_key, _c_s_sess, _sess_key):\n self._set_key(_serv_key)\n self.grant = self._encrypt(_grant.serialize())\n self._set_key(_sess_key)\n self.c_s_session_key = self._encrypt(_c_s_sess)\n\n def get_c_s_key(self, _sess_key):\n self._set_key(_sess_key)\n return self._decrypt(self.c_s_session_key)\n\n\nclass ServiceRequestMessage(BaseMessage):\n grant = ''\n id_message = ''\n\n def __init__(self, _id, _sess_key, _grant):\n self._set_key(_sess_key)\n self.id_message = self._encrypt(_id.serialize())\n self.grant = _grant\n\n def get_id(self, _sess_key):\n self._set_key(_sess_key)\n representation = self._decrypt(self.id_message)\n return IDMessage.deserialize(representation)\n\n def get_grant(self, _serv_key):\n self._set_key(_serv_key)\n representation = self._decrypt(self.grant)\n return GrantMessage.deserialize(representation)\n\n\nclass ServiceGrantingMessage(BaseMessage):\n timestamp = ''\n\n def __init__(self, _time, _sess_key):\n self._set_key(_sess_key)\n self.timestamp = self._encrypt(str(_time))\n\n def verify_time(self, _sess_key, _expected_time):\n self._set_key(_sess_key)\n t = self._decrypt(self.timestamp)\n try:\n if int(t) == int(_expected_time):\n return True\n else:\n return False\n except Exception:\n return False\n" }, { "alpha_fraction": 0.6888889074325562, "alphanum_fraction": 0.7432098984718323, "avg_line_length": 20.3157901763916, "blob_id": "1380a8bbf918602657cbcd88f83ddb4ef9e476f9", "content_id": "c319b42bcaa2f19c9c433206e95a115035bcf5c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 405, "license_type": "no_license", "max_line_length": 103, "num_lines": 19, "path": "/README.md", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "# Kerbor\nKerbor is a kerberos-like protocol implementation\n\n# Installation\n```\ngit clone https://github.com/kibach/kerbor.git && cd kerbor\npip install -r requirements.txt\npython serve.py\n```\n\nNow, open http://127.0.0.1:5000/newuser in your browser and register new user. For example: test/123098\n\nIn separate terminal:\n\n```\npython client.py test 123098\n```\n\nIf auth is successful, it just outputs `True`.\n" }, { "alpha_fraction": 0.6485797762870789, "alphanum_fraction": 0.649672269821167, "avg_line_length": 26.73737335205078, "blob_id": "4d3648d3ea7e3e46cd66de195d117f38cdf8fc57", "content_id": "a8c317c4da67c94855cb9282458073a4d6dbfbba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2746, "license_type": "no_license", "max_line_length": 104, "num_lines": 99, "path": "/serve.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom flask import Flask\nfrom flask import make_response\nfrom flask import request\n\nfrom server.kerborsrv import KerborAuthenticationServer, KerborTicketGrantingServer, KerborServiceServer\nfrom server.userserver import UserServer\nfrom server.storage import SimpleKVStorage\n\napp = Flask(__name__)\nstore = SimpleKVStorage()\nus = UserServer(store)\nkerbor_as = KerborAuthenticationServer(us)\nkerbor_tgs = KerborTicketGrantingServer(us)\nkerbor_service = KerborServiceServer(us)\n\n\[email protected]_request\ndef log_request():\n logfile.write('{} {}\\n'.format(request.method, request.path))\n logfile.write('{}\\n\\n->\\n\\n'.format(request.get_data()))\n\n\[email protected]_request\ndef log_response(response):\n logfile.write('{}\\n\\n---\\n\\n'.format(response.get_data()))\n return response\n\n\[email protected]('/')\ndef hello():\n resp = make_response(\"\"\"Kerbor Srv\n / - help\n /newuser - Register a new user (web only)\n /logmein - Step1\n /getmeticket - Step2\n /getmeservice - Step3\"\"\")\n resp.headers['Content-Type'] = 'text/plain'\n return resp\n\n\[email protected]('/logmein', methods=['POST', 'GET'])\ndef log_me_in():\n if request.method == 'GET':\n return 'I expect AuthenticateMeMessage to be POSTed'\n\n json_obj = kerbor_as.handle(request.get_data())\n resp = make_response(json_obj)\n resp.headers['Content-Type'] = 'application/json'\n\n return resp\n\n\[email protected]('/getmeticket', methods=['POST', 'GET'])\ndef get_me_ticket():\n if request.method == 'GET':\n return 'I expect TicketRequestMessage to be POSTed'\n\n json_obj = kerbor_tgs.handle(request.get_data())\n resp = make_response(json_obj)\n resp.headers['Content-Type'] = 'application/json'\n\n return resp\n\n\[email protected]('/getmeservice', methods=['POST', 'GET'])\ndef get_me_service():\n if request.method == 'GET':\n return 'I expect ServiceRequestMessage to be POSTed'\n\n json_obj = kerbor_service.handle(request.get_data())\n resp = make_response(json_obj)\n resp.headers['Content-Type'] = 'application/json'\n\n return resp\n\n\[email protected]('/newuser', methods=['POST', 'GET'])\ndef new_user():\n if request.method == 'GET':\n return \"\"\"<form method=post>\n <input type=text placeholder=Username name=username><br>\n <input type=password placeholder=Password name=password><br>\n <input type=submit value=Submit>\n </form>\"\"\"\n else:\n if 'username' in request.form and 'password' in request.form:\n us.register(\n request.form['username'],\n request.form['password']\n )\n return 'Success'\n else:\n return 'Failure'\n\nif __name__ == '__main__':\n logfile = open('reqs.log', 'w')\n app.run()\n logfile.close()\n" }, { "alpha_fraction": 0.6632652878761292, "alphanum_fraction": 0.6632652878761292, "avg_line_length": 37.24390411376953, "blob_id": "06aec4e65d4d11ecc932ecf7af414163395eef3f", "content_id": "1ddef89ec2d32d62e8768bda0dac59bc64774947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1568, "license_type": "no_license", "max_line_length": 109, "num_lines": 41, "path": "/client/kerborclient.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "from shared import messages\nimport hmac\n\n\nclass KerborClient(object):\n user_name = ''\n user_password = ''\n user_secret = ''\n emitter = None\n\n def __init__(self, _username, _password):\n self.user_name = _username\n self.user_password = _password\n self.user_secret = hmac.new(bytes(_password), bytes(_username)).hexdigest()\n\n def set_emitter(self, _emitter):\n self.emitter = _emitter\n\n def authenticate_for_service(self, tgs, service):\n if self.emitter is None:\n raise TypeError(\"emitter should be instance of Emitter\")\n\n auth = messages.AuthenticateMeMessage(self.user_name, tgs)\n check, auth_response = self.emitter.emit(auth)\n if not check:\n raise ValueError(\"Something went wrong\")\n\n tgs_sess_key = auth_response.get_sess_key(self.user_secret)\n id_message = messages.IDMessage(self.user_name)\n ticket_request = messages.TicketRequestMessage(service, id_message, tgs_sess_key, auth_response.tgt)\n check, ticket_response = self.emitter.emit(ticket_request)\n if not check:\n raise ValueError(\"Something went wrong\")\n\n service_sess_key = ticket_response.get_c_s_key(tgs_sess_key)\n service_request = messages.ServiceRequestMessage(id_message, service_sess_key, ticket_response.grant)\n check, service_response = self.emitter.emit(service_request)\n if not check:\n raise ValueError(\"Something went wrong\")\n\n return service_response.verify_time(service_sess_key, id_message.timestamp)\n" }, { "alpha_fraction": 0.6336489319801331, "alphanum_fraction": 0.6336489319801331, "avg_line_length": 34.20512771606445, "blob_id": "359d5bef4208f184baad52348b39bcbca90763d7", "content_id": "73161f5210de54dc6bf8b477931481a0aa85e100", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1373, "license_type": "no_license", "max_line_length": 98, "num_lines": 39, "path": "/client/emitter.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "import requests\nfrom shared import messages\n\n\nclass Emitter(object):\n remote_url = ''\n proxies = {}\n\n def __init__(self, _server):\n self.remote_url = _server\n\n def set_proxies(self, _proxies):\n self.proxies = _proxies\n\n def emit(self, message):\n method = ''\n if isinstance(message, messages.AuthenticateMeMessage):\n method = 'logmein'\n elif isinstance(message, messages.TicketRequestMessage):\n method = 'getmeticket'\n elif isinstance(message, messages.ServiceRequestMessage):\n method = 'getmeservice'\n\n data = message.serialize()\n r = requests.post('{}{}'.format(self.remote_url, method), data=data, proxies=self.proxies)\n\n response = None\n check = False\n if method == 'logmein':\n class_name, response = messages.AuthenticationResponseMessage.deserialize(r.content)\n check = class_name == 'AuthenticationResponseMessage'\n elif method == 'getmeticket':\n class_name, response = messages.TicketGrantingMessage.deserialize(r.content)\n check = class_name == 'TicketGrantingMessage'\n elif method == 'getmeservice':\n class_name, response = messages.ServiceGrantingMessage.deserialize(r.content)\n check = class_name == 'ServiceGrantingMessage'\n\n return check, response\n" }, { "alpha_fraction": 0.6431870460510254, "alphanum_fraction": 0.6547344326972961, "avg_line_length": 36.65217208862305, "blob_id": "09cae583e31c8a90fe5530a8866f498df7107283", "content_id": "f6b4b8ba9eb1ff1bc0d8b8e36bc5a8b9a32f6bee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "no_license", "max_line_length": 96, "num_lines": 23, "path": "/client.py", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "from client import emitter, kerborclient\nimport argparse\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"username\", help=\"Username to authenticate with\")\n parser.add_argument(\"password\", help=\"Password to use\")\n parser.add_argument(\"--proxy\", help=\"Use HTTP proxy server\")\n parser.add_argument(\"--server\", help=\"Set Kerbor server manually\", default='127.0.0.1:5000')\n\n args = parser.parse_args()\n http_emitter = emitter.Emitter('http://{}/'.format(args.server))\n if args.proxy:\n http_emitter.set_proxies({\n 'http': 'http://{}'.format(args.proxy),\n 'https': 'http://{}'.format(args.proxy)\n })\n\n client = kerborclient.KerborClient(args.username, args.password)\n client.set_emitter(http_emitter)\n\n print client.authenticate_for_service('default', 'database')\n" }, { "alpha_fraction": 0.7749999761581421, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 19, "blob_id": "f5787fc57f23b6e2752b4a3b660b056dc2ef983c", "content_id": "44d7007ccea3d6462c00daca17b878980630b7d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/.data/README.md", "repo_name": "kibach/kerbor", "src_encoding": "UTF-8", "text": "# .data\nSimpleKV persists its data here\n" } ]
10
blacktrub/Instagram-API-python
https://github.com/blacktrub/Instagram-API-python
47a9173f490302fa0c788d39cc72f57eee6384c5
4529a016c552bdd57fd119521f891e4c71e257cd
e5f55e052f202ef8011ac263fe1b23755f6a1ca2
refs/heads/master
2020-07-01T12:48:21.861310
2016-11-22T08:14:48
2016-11-22T08:14:48
74,340,532
0
0
null
2016-11-21T08:07:40
2016-11-14T13:10:12
2016-11-11T17:49:52
null
[ { "alpha_fraction": 0.4082568883895874, "alphanum_fraction": 0.4330275356769562, "avg_line_length": 28.45945930480957, "blob_id": "1767a5997963f539d0df3fd46f458a3e4ac1f3d6", "content_id": "ea67a962a6ddbc53d7256d23b7775785a8303348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1090, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/settings.py", "repo_name": "blacktrub/Instagram-API-python", "src_encoding": "UTF-8", "text": "LOGGING_SETTINGS = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '[%(asctime)s: %(levelname)s/%(name)s] %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'error_file': {\n 'level': 'ERROR',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/var/log/instagram_api.log',\n 'maxBytes': 1024 * 1024 * 100, # 5 MB\n 'backupCount': 7,\n 'formatter': 'verbose',\n },\n 'access_file': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/var/log/instagram_api.log',\n 'maxBytes': 1024 * 1024 * 100, # 5 MB\n 'backupCount': 7,\n 'formatter': 'verbose',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['error_file', 'access_file'],\n 'level': \"INFO\",\n },\n }\n}\n" }, { "alpha_fraction": 0.7213656306266785, "alphanum_fraction": 0.7378854751586914, "avg_line_length": 21.674999237060547, "blob_id": "32bc3e132656a26284e408e002131721a6e1dae8", "content_id": "91cc7c7f22978311b9a7bfbfc570097c9e981df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 908, "license_type": "no_license", "max_line_length": 70, "num_lines": 40, "path": "/test.py", "repo_name": "blacktrub/Instagram-API-python", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\n# Before you start: You need to define constants.py file:\n# login = 'login'\n# password = 'password'\n# test_user_id = 0000000\n# test_user_post_id = 0000000\nfrom pprint import pprint\n\nfrom InstagramAPI import InstagramAPI\nfrom constants import login, password, test_user_id, test_user_post_id\n\napi = InstagramAPI(login, password)\napi.login()\n\napi.follow(test_user_id)\napi.unfollow(test_user_id)\n\napi.like(test_user_post_id)\napi.unlike(test_user_post_id)\n\nprint '## Followers ##'\napi.getUserFollowers(test_user_id)\npprint(api.LastJson)\n\nprint '## Following ##'\napi.getUserFollowings(test_user_id)\npprint(api.LastJson)\n\napi.comment(test_user_post_id, 'test comment')\n\nprint '## Media likers ##'\napi.getMediaLikers(test_user_post_id)\npprint(api.LastJson)\n\nprint '## Geo media ##'\napi.getGeoMedia(test_user_id)\npprint(api.LastJson)\n\napi.uploadPhoto('test_photo.jpg', caption='test load photo')\n\n" } ]
2
fvergaracl/gonsa2_backend
https://github.com/fvergaracl/gonsa2_backend
87a2a080e3c9c38104d19c553965ec1f353468cc
2e354179cd337e6e98a6f23eb0a5c3f9c5e7b57e
5186f8a96350b9729f1bb2d67617a528390caf14
refs/heads/master
2022-12-17T04:57:22.144534
2019-10-20T19:16:53
2019-10-20T19:16:53
216,267,179
0
0
MIT
2019-10-19T20:35:26
2019-10-20T19:16:55
2022-12-08T01:23:09
Python
[ { "alpha_fraction": 0.721030056476593, "alphanum_fraction": 0.733905553817749, "avg_line_length": 22.299999237060547, "blob_id": "dd5db6276a64a7d98b3c0c767003843abb11218b", "content_id": "d56874ba03bc22953bb43aad43ee3874ba615deb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "permissive", "max_line_length": 63, "num_lines": 10, "path": "/flask.wsgi", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\n\nactivate_this = '/var/www/api_gonsa2/venv/bin/activate_this.py'\nexecfile(activate_this, dict(__file__=activate_this))\n\nsys.path.insert(0, \"/var/www/api_gonsa2\")\n\nfrom init import app as application\n" }, { "alpha_fraction": 0.703208863735199, "alphanum_fraction": 0.7035667300224304, "avg_line_length": 47.17241287231445, "blob_id": "5ad260c293bc2d52e59a90e9fc6714c0cce80c34", "content_id": "5f32dbdfc7c7dd6cd652f8b348ec5fa64ed25be0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8383, "license_type": "permissive", "max_line_length": 774, "num_lines": 174, "path": "/models/only_students.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom settings.config import Database, Config\nfrom datetime import datetime\n\ndef get_all_challenges_student(student_nick):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT all_c.id_number, all_c.title, all_c.photourl, all_c.summary, all_c.description, all_c.aim, all_c.created, all_c.last_edit, all_c.fk_category ,all_c.owner_fk_nick FROM all_challenges as all_c INNER JOIN class_challenges INNER JOIN class_list INNER JOIN classes WHERE class_challenges.FK_challenge_id_number = all_c.id_number AND class_list.id_number = class_challenges.FK_class_id_number AND classes.FK_class_id_number = class_list.id_number AND classes.FK_student_nick = %s;\"\n\t\tdata = (student_nick,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (id_number, title, photourl, summary, description, aim, created, last_edit, fk_category, owner_fk_nick) in cursor:\n\t\t\ttemp = [id_number, title, photourl, summary, description, aim, created, last_edit, fk_category, owner_fk_nick]\n\t\t\tr.append(temp)\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn r\n\texcept Exception as e:\n\t\tprint e\n\ndef get_status_challenge_by_id(nick_student,id_challenge_):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT finalized, FK_student_nick FROM Challenge_last_activity WHERE FK_student_nick = %s AND FK_challenge_id_number = %s;\"\n\t\tdata = (nick_student,id_challenge_,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (finalized, FK_student_nick) in cursor:\n\t\t\tif finalized == '1':\n\t\t\t\treturn 'finalized'\n\t\t\telse:\n\t\t\t\treturn 'init'\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn 'noinit'\n\texcept Exception as e:\n\t\tprint e\n\ndef get_all_challenges_student_finalized(student_nick):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT all_c.id_number, all_c.title, all_c.photourl, all_c.summary, all_c.description, all_c.aim, all_c.created, all_c.last_edit, all_c.fk_category ,all_c.owner_fk_nick, cla.init_date, cla.end_date, cla.number_of_interaction, cla.last_response FROM all_challenges as all_c INNER JOIN class_challenges INNER JOIN Challenge_last_activity as cla INNER JOIN class_list INNER JOIN classes WHERE class_challenges.FK_challenge_id_number = all_c.id_number AND class_list.id_number = class_challenges.FK_class_id_number AND classes.FK_class_id_number = class_list.id_number AND classes.FK_student_nick = %s AND cla.FK_student_nick = classes.FK_student_nick and cla.FK_challenge_id_number = all_c.id_number and cla.finalized = '1' ORDER BY cla.number_of_interaction DESC; \"\n\t\tdata = (student_nick,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (id_number, title, photourl, summary, description, aim, created, last_edit, fk_category, owner_fk_nick, init_date, end_date, number_of_interaction, last_response) in cursor:\n\t\t\ttemp = [id_number, title, photourl, summary, description, aim, created, last_edit, fk_category, owner_fk_nick, init_date, end_date, number_of_interaction, last_response]\n\t\t\tr.append(temp)\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn r\n\texcept Exception as e:\n\t\tprint e\n\t\tprint 'fina'\n\ndef get_all_challenges_student_no_finalized(student_nick):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT all_c.id_number, all_c.title, all_c.photourl, all_c.summary, all_c.description, all_c.aim, all_c.created, all_c.last_edit, all_c.fk_category ,all_c.owner_fk_nick, cla.init_date, cla.number_of_interaction, cla.last_response FROM all_challenges as all_c INNER JOIN class_challenges INNER JOIN Challenge_last_activity as cla INNER JOIN class_list INNER JOIN classes WHERE class_challenges.FK_challenge_id_number = all_c.id_number AND class_list.id_number = class_challenges.FK_class_id_number AND classes.FK_class_id_number = class_list.id_number AND classes.FK_student_nick = %s AND cla.FK_student_nick = classes.FK_student_nick and cla.FK_challenge_id_number = all_c.id_number and cla.finalized = '0' ORDER BY cla.number_of_interaction DESC;\"\n\t\tdata = (student_nick,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (id_number, title, photourl, summary, description, aim, created, last_edit, fk_category, owner_fk_nick, init_date, number_of_interaction, last_response) in cursor:\n\t\t\ttemp = [id_number, title, photourl, summary, description, aim, created, last_edit, fk_category, owner_fk_nick, init_date, number_of_interaction, last_response]\n\t\t\tr.append(temp)\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn r\n\texcept Exception as e:\n\t\tprint e\n\t\tprint ' no final'\n\ndef get_all_challenges_student_by_cat(student_nick, category_):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT all_c.id_number, all_c.title, all_c.photourl, all_c.summary, all_c.description, all_c.aim, all_c.created, all_c.last_edit, all_c.owner_fk_nick FROM all_challenges as all_c INNER JOIN class_challenges INNER JOIN class_list INNER JOIN classes WHERE class_challenges.FK_challenge_id_number = all_c.id_number AND class_list.id_number = class_challenges.FK_class_id_number AND classes.FK_student_nick = %s AND all_c.fk_category = %s;\"\n\t\tdata = (student_nick,category_,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (id_number, title, photourl, summary, description, aim, created, last_edit, owner_fk_nick) in cursor:\n\t\t\ttemp = [id_number, title, photourl, summary, description, aim, created, last_edit, owner_fk_nick]\n\t\t\tr.append(temp)\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn r\n\texcept Exception as e:\n\t\tprint e\n\ndef add_text_library(FK_student_nick , FK_challenge_id_number , title_text, url_text , date_added, state):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"INSERT INTO Student_personal_library(FK_student_nick , FK_challenge_id_number , title_text, url_text, date_added, state) VALUES (%s, %s, %s, %s, %s, %s);\"\n data = (FK_student_nick , FK_challenge_id_number , title_text, url_text , date_added, state,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef check_is_in_text_library(FK_student_nick , FK_challenge_id_number , title_text, url_text):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT state FROM Student_personal_library WHERE FK_student_nick=%s AND FK_challenge_id_number=%s AND title_text=%s AND url_text=%s;\"\n\t\tdata = (FK_student_nick , FK_challenge_id_number , title_text, url_text,)\n\t\tcursor.execute(query, data)\n\t\tfor (state) in cursor:\n\t\t\treturn True\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn False\n\texcept Exception as e:\n\t\tprint e\n\t\treturn False\n\ndef update_text_library(FK_student_nick , FK_challenge_id_number , title_text, url_text, state):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"UPDATE Student_personal_library SET state=%s WHERE FK_student_nick=%s AND FK_challenge_id_number=%s AND title_text=%s AND url_text=%s;\" \n data = (state,FK_student_nick , FK_challenge_id_number , title_text, url_text,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef getallmylibrary_by_challenge(FK_student_nick, FK_challenge_id_number):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT title_text, url_text, date_added, state FROM Student_personal_library WHERE FK_student_nick=%s AND FK_challenge_id_number=%s;\"\n\t\tdata = (FK_student_nick , FK_challenge_id_number,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (title_text, url_text, date_added, state) in cursor:\n\t\t\tr.append([title_text, url_text, date_added, state])\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn r\n\texcept Exception as e:\n\t\tprint e\n\t\treturn []\n\n" }, { "alpha_fraction": 0.6047529578208923, "alphanum_fraction": 0.6110068559646606, "avg_line_length": 35.34090805053711, "blob_id": "c4acc5ef1fb9abe2d9f029626895202a89fd6ded", "content_id": "fceedf0872338c8705ceece8b08a74e4978a5d79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1599, "license_type": "permissive", "max_line_length": 386, "num_lines": 44, "path": "/models/categories.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom settings.config import Database, Config\n\ndef get_all_categories_profesor():\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT category, description FROM categories WHERE 1 = 1\"\n cursor.execute(query)\n r = []\n for (category, description) in cursor:\n r.append([category, description])\n cursor.close()\n cnx.close()\n return r\n\n\ndef get_all_categories_student(nick_student):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT all_c.fk_category, categories.description FROM all_challenges as all_c INNER JOIN class_challenges INNER JOIN class_list INNER JOIN categories INNER JOIN classes WHERE class_challenges.FK_challenge_id_number = all_c.id_number AND class_list.id_number = class_challenges.FK_class_id_number AND categories.category= all_c.fk_category AND classes.FK_student_nick = %s;\"\n data = (nick_student,)\n cursor.execute(query,data)\n r = []\n for (category, description) in cursor:\n noesta = True\n if len(r) !=0:\n for x in r:\n if x[0] == category:\n noesta = False\n if noesta:\n r.append([category, description,1])\n else:\n for x in r:\n if x[0] == category:\n x[2] = x[2] + 1\n else:\n r.append([category, description,1])\n cursor.close()\n cnx.close()\n return r\n" }, { "alpha_fraction": 0.6102719306945801, "alphanum_fraction": 0.6102719306945801, "avg_line_length": 30.571428298950195, "blob_id": "a7363451f1a6d34daabd453904258eaae194ae5a", "content_id": "0fe1f2e2bf0308517bcbb91e66bda3c2f9ba769a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "permissive", "max_line_length": 92, "num_lines": 21, "path": "/models/records.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom settings.config import Database, Config\nfrom datetime import datetime\nfrom functions.general_functions import *\n\n\ndef insert_general_record(action_, data_, fk_nick_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO records(action, data, dtime, fk_nick) VALUES (%s, %s, %s, %s);\"\n data = (action_, str(data_), now, fk_nick_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n except Exception as e:\n print e" }, { "alpha_fraction": 0.5787546038627625, "alphanum_fraction": 0.581462025642395, "avg_line_length": 29.043062210083008, "blob_id": "273ecdbdd1d094e6827db718d6f664b6b4669da3", "content_id": "c43a81617908828fb000e9643e8257a2af4839dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6279, "license_type": "permissive", "max_line_length": 201, "num_lines": 209, "path": "/models/users.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom settings.config import Database, Config\nfrom functions.general_functions import encrypt_pass, random_salt, get_random\nfrom datetime import datetime, timedelta\n\ndef get_email(user_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT email FROM users WHERE nick = %s\"\n data = (user_,)\n cursor.execute(query, data)\n for (email) in cursor:\n r = str(email[0])\n return r\n cursor.close()\n cnx.close()\n return ''\n\ndef user_email_exist(user_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT nick, email FROM users WHERE nick = %s or email =%s\"\n data = (user_,user_,)\n cursor.execute(query, data)\n r = ['','']\n for (nick, email) in cursor:\n r = [str(nick), str(email)]\n return [True, r]\n cursor.close()\n cnx.close()\n return [False, r]\n\ndef get_salt_of_user(user_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT salt FROM users WHERE nick = %s\"\n data = (user_,)\n cursor.execute(query, data)\n for (salt) in cursor:\n r = str(salt[0])\n return r\n cursor.close()\n cnx.close()\n return r\n\ndef login_user(user_, passw_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n salt = get_salt_of_user(user_)\n passw_salted = encrypt_pass(passw_,salt)\n query = \"SELECT nick FROM users WHERE nick = %s AND password = %s \"\n data = (user_, passw_salted)\n cursor.execute(query, data)\n for (nick) in cursor:\n r = True\n return r\n cursor.close()\n cnx.close()\n r = False\n return r\n\ndef get_nick_by_email(email):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n salt = get_salt_of_user(user_)\n passw_salted = encrypt_pass(passw_,salt)\n query = \"SELECT nick, email FROM users WHERE email = %s \"\n data = (user_, passw_salted)\n cursor.execute(query, data)\n for (nick, email) in cursor:\n return nick\n cursor.close()\n cnx.close()\n r = False\n return r\n\ndef updatepass(user_ , pass_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n salt = random_salt()\n password = encrypt_pass(pass_, salt)\n semi_query = (\"UPDATE users \"\n \"SET password=%s , salt=%s \"\n \"WHERE nick=%s\")\n data = (str(password),str(salt), user_)\n cursor.execute(semi_query, data)\n cnx.commit()\n cnx.close()\n return True\n except Exception as e:\n print e\n return False\n\ndef get_rol_of_user(user_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT fk_roluser FROM users WHERE nick = %s;\"\n data = (user_,)\n cursor.execute(query, data)\n for (rol_id) in cursor:\n return rol_id[0]\n cursor.close()\n cnx.close()\n return ''\n\ndef get_recovery_token(user_):\n token = ''\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT token FROM recovery_pass_token WHERE used = 0 AND expire >= NOW() AND fk_nick = %s;\"\n data = (user_,)\n cursor.execute(query, data)\n for (token) in cursor:\n return True, token\n cursor.close()\n cnx.close()\n return False, token\n\ndef insert_token(user_):\n try:\n token = get_random(30)\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n exp = datetime.now() - timedelta(minutes=60*4)\n query = \"INSERT INTO recovery_pass_token(token, fk_nick, expire, used) VALUES (%s, %s, DATE_ADD(NOW(), INTERVAL 4 HOUR), 0);\"\n data = (str(token),user_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True , token\n except Exception as e:\n print e\n return False, ''\n \n\ndef updatestatus_recovery_token(user_,token_ ):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n semi_query = (\"UPDATE recovery_pass_token \"\n \"SET used=%i \"\n \"WHERE fk_nick=%s AND token=%s;\")\n data = (1, user_,token_)\n cursor.execute(semi_query, data)\n cnx.commit()\n cnx.close()\n return True\n except Exception as e:\n print e\n return False\n\ndef get_all_countries():\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT country, descr FROM countries WHERE 1= 1;\"\n cursor.execute(query)\n r = []\n for (country, descr ) in cursor:\n r.append([str(country), str(descr)])\n cursor.close()\n cnx.close()\n return r\n\n\ndef create_user(nick_, email_, sex_, school, class_, birth_year_, birth_month_, birth_day_, country_):\n try:\n token = get_random(30)\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n pass_ = get_random(8)\n salt = random_salt()\n password = encrypt_pass(pass_, salt)\n query = \"INSERT INTO users(nick, email, fk_roluser, password, salt, sex, school, class, birth_year, birth_month, birth_day, fk_country) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n data = (nick_, email_,'Student', password,salt, sex_, school, class_, birth_year_, birth_month_, birth_day_, country_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True , pass_\n except Exception as e:\n print e\n return False, ''\n" }, { "alpha_fraction": 0.5210374593734741, "alphanum_fraction": 0.533090353012085, "avg_line_length": 37.988765716552734, "blob_id": "c21ca2e30ca8f74e87ed33639d60aa461aa2e688", "content_id": "937b6375db3158aac2766d4d3e3094326253aa18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58990, "license_type": "permissive", "max_line_length": 221, "num_lines": 1513, "path": "/init.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom settings.config import Config, Database\n\nfrom functions.Verifiers import verify_new_pass, verify_pass_conditions, validemail\nfrom functions.general_functions import *\nfrom functions.email_sender import *\n\nfrom models.users import *\nfrom models.only_administrador import *\nfrom models.only_professor import *\nfrom models.only_students import *\nfrom models.categories import *\nfrom models.challenges import *\nfrom models.records import *\n\nfrom flask import Flask, jsonify, request\nimport jwt\nimport datetime\nimport ast\nimport json\nfrom functools import wraps\nfrom flask_cors import CORS\n\nUPLOAD_FOLDER = '/static/uploads'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 1600 * 1024 * 1024\n\nCORS(app)\nc = Config()\n\n\ndef get_info_token():\n tokenTEMP = request.headers['Authorization']\n token = tokenTEMP.split(\" \")\n data = jwt.decode(token[1], c.get_jwt_key())\n return data\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n try:\n tokenTEMP = request.headers['Authorization']\n token = tokenTEMP.split(\" \")\n if not tokenTEMP:\n # token not found\n return jsonify({'message': 'Token not found', 'code': 404})\n try:\n # token valid\n data = jwt.decode(token[1], c.get_jwt_key())\n except Exception as e:\n # token not valid\n return jsonify({'message': 'Token not valid', 'code': 403})\n return f(*args, **kwargs)\n except KeyError:\n # token not found\n return jsonify({'message': 'Token not found', 'code': 404})\n return decorated\n\n############################## TEST URIS #############################\n\[email protected]('/', methods=['GET'])\ndef testing_root():\n return jsonify({'message': \"It's works\", 'code': 200})\n\[email protected]('/islogged', methods=['GET'])\n@token_required\ndef testing_logged():\n \"\"\"\n Returns the data contained in the token, and thus validates that the token is valid\n \"\"\"\n data = get_info_token()\n return jsonify({'message': \"You're logged\",'data':data , 'code': 200})\n\n############################## END TEST URIS #############################\n\n\n############################## GET USER INFORMATION #############################\n\[email protected]('/userinformation', methods=['GET'])\n@token_required\ndef userinformation():\n \"\"\"\n Returns the data contained in the token, and thus validates that the token is valid\n \"\"\"\n tokenTEMP = request.headers['Authorization']\n token = tokenTEMP.split(\" \")\n data = jwt.decode(token[1], c.get_jwt_key())\n return jsonify({'User': data['User'], 'Rol':data['Rol'], 'code': 200})\n############################## END USER INFORMATION #############################\n\n############################## USER URIS #############################\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\"\n Receive a user (email or user) and a password\n Return: message with token (Only if the information entered is correct)\n \"\"\"\n name_ = ''\n token = ''\n try:\n code = 400\n user_ = request.get_json()['user'].encode('utf-8').strip()\n pass_ = request.get_json()['pass'].encode('utf-8').strip()\n if user_ == '' or user_.strip() == '':\n message = 'You must enter an email or your user to enter the system'\n raise MyException(\"error\")\n if pass_ == '' or pass_.strip() == '':\n message = 'You must enter your password to enter the system'\n raise MyException(\"error\")\n all_ok = login_user(user_, pass_)\n if all_ok:\n code = 200\n message = 'Welcome'\n rol_ = get_rol_of_user(user_)\n name_ = user_\n insert_general_record('login', {}, name_)\n token = jwt.encode(\n {'User': name_, 'Rol': rol_, 'exp': datetime.datetime.utcnow() + c.get_jwt_time()},\n c.get_jwt_key())\n else:\n code = 400\n message = 'User or password, not valid'\n user_ = ''\n except MyException as e:\n return jsonify({'code': code,\n 'message': message,\n 'name': name_,\n 'token': token})\n except Exception as e:\n code = 500\n message = 'Something went wrong'\n return jsonify({'code': code,\n 'message': message,\n 'name': name_,\n 'token': token})\n\[email protected]('/changepass', methods=['POST'])\n@token_required\ndef changepass():\n \"\"\"\n Receive the old password and twice new password\n Return: message with the result of this operation (Only if the information entered is correct)\n \"\"\"\n user_ = get_info_token()['User']\n code = 400\n try:\n old_pass = request.get_json()['oldpass'].encode('utf-8').strip()\n new_pass1 = request.get_json()['newpass1'].encode('utf-8').strip()\n new_pass2 = request.get_json()['newpass2'].encode('utf-8').strip()\n if old_pass == '' or old_pass.strip() == '':\n message = 'You must enter your old password'\n raise MyException(\"error\")\n if new_pass1 == '' or new_pass1.strip() == '' or new_pass2 == '' or new_pass2.strip() == '':\n message = 'You must enter your new password twice'\n raise MyException(\"error\")\n all_ok, message = verify_new_pass(user_, old_pass, new_pass1,new_pass2)\n if all_ok:\n code = 200\n if updatepass(user_, new_pass1):\n ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n #Platform name , username, ip, datetime, email contact\n now = datetime.datetime.now()\n email_ = get_email(user_)\n if email_ != '':\n parameters = [c.get_platform_name(),user_, ip, now.strftime(\"%Y-%m-%d %H:%M\"), c.get_email_contact(), c.get_email_contact()]\n response_bool , response = email_to_send(parameters,'pass_changed','Your password has been changed', email_)\n message = 'The password was successfully updated, and email was sent. (' + email_ + ')'\n else:\n message = 'The password was successfully updated'\n insert_general_record('changepass', {\n \"user_\": \"updated\"\n }, user_)\n else:\n message = 'Could not update the password'\n except Exception as e:\n print e\n code = 500\n message = 'Something went wrong'\n\n \n return jsonify({'code': code,\n 'message': message})\n\[email protected]('/recoverpassbyemail', methods=['POST'])\ndef recoverpassbyemail():\n \"\"\"\n Receive an user or email to define a new password\n Return: send an email with link to define a new password\n \"\"\"\n code = 400\n try:\n email = request.get_json()['user_email'].encode('utf-8').strip()\n if email == '' or email.strip() == '' or email == None:\n message = 'You must enter an email or your user to recover your password.'\n raise MyException(\"error\")\n resp_bool , user_email_ = user_email_exist(email)\n if not resp_bool:\n message ='The user or email entered, is not linked to any account.'\n else:\n have_valid_token, token = get_recovery_token(user_email_[0])\n if have_valid_token:\n message = 'You already have a request registered in our system, remember that this request is valid only for 4 hours. Please check your email'\n else:\n code = 200\n response, token = insert_token(user_email_[0])\n ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n now = datetime.datetime.now()\n parameters = [c.get_platform_name(),user_email_[0], c.get_api_host()+'/recovery/'+ token, ip, now ] \n response_bool , response = email_to_send(parameters,'password_recovery','Reset your password', user_email_[1])\n if response_bool:\n message = 'It sent an email for restoring your password'\n else:\n message = 'An error was generated in the server, the email could not be sent correctly. Please communicate with the administrator'\n username_ = get_nick_by_email(email.strip())\n insert_general_record('recoverpassbyemail', {\n \"message\": message}, username_)\n except MyException as e:\n return jsonify({'code': code,\n 'message': message})\n except Exception as e:\n print e\n code = 500\n message = 'Something went wrong'\n \n return jsonify({'code': code,\n 'message': message})\n\n\[email protected]('/newpassword/<token>' , methods=['POST'])\ndef newpassword_bytoken(token):\n \"\"\"\n Receive a token by uri , with this method the user can change the password, however the user must request an email using the 'recoverpassbyemail' method\n Return: a message if the operation is successful or not\n \"\"\"\n try:\n code = 400\n email = request.get_json()['email'].encode('utf-8').strip()\n pass1 = request.get_json()['pass1'].encode('utf-8').strip()\n pass2 = request.get_json()['pass2'].encode('utf-8').strip()\n if not (pass1 == pass2):\n message = 'Passwords entered do not match'\n elif email == '' or email.strip() == '' or email == None:\n message = 'You must enter an email.'\n raise MyException(\"error\")\n elif pass1 == '' or pass1.strip() == '' or pass1 == None:\n message = 'You must enter a password.'\n raise MyException(\"error\")\n else:\n all_ok, msg = verify_pass_conditions(pass1)\n if all_ok:\n resp_bool , user_email_ = user_email_exist(email)\n if resp_bool:\n token_bool, token = get_recovery_token(user_email_[0])\n if token_bool:\n updatestatus_recovery_token(user_email_[0],token)\n updatepass(user_email_[0],pass1)\n ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n parameters = [c.get_platform_name(),user_email_[0], ip, now.strftime(\"%Y-%m-%d %H:%M\"), c.get_email_contact(), c.get_email_contact()]\n response_bool , response = email_to_send(parameters,'pass_changed','Your password has been changed', email_)\n code = 200\n message = 'The password was successfully updated'\n else:\n code = 400\n message = 'The provided email is not linked with the token'\n username_ = get_nick_by_email(email.strip())\n insert_general_record('newpassword/[token]',\n {\"message\": message,\n \"token\": token,\n \"code\": code}, username_) \n else:\n code = 400\n message = 'The provided email is not linked with the token'\n \n else:\n message = msg\n except MyException as e:\n return jsonify({'code': code,\n 'message': message})\n except Exception as e:\n print e\n code = 500\n message = 'Something went wrong'\n return jsonify({'code': code,\n 'message': message})\n \n############################## END USER URIS #############################\n\n############################## COMMON URIS #############################\n\n\n\n\[email protected]('/getallcategories', methods=['GET'])\n@token_required\ndef getallcategories_():\n \"\"\"\n Returns all defined categories\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n categories =[]\n code = 200\n if rol_ == 'Professor':\n try:\n categories = get_all_categories_profesor()\n code = 200\n except Exception as e:\n print e\n code = 500\n elif rol_ == 'Student':\n try:\n categories = get_all_categories_student(user_)\n code = 200\n except Exception as e:\n print e\n code = 500\n insert_general_record('getallcategories', \n {'categories': categories,\n 'code': code}\n ,user_)\n return jsonify({'categories': categories, 'code': code})\n\[email protected]('/getclassbyid/<numid>', methods=['GET'])\n@token_required\ndef getclasesbyid_(numid):\n \"\"\"\n Returns all defined clases by id [number]\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n data_response = ''\n if rol_ == 'Professor':\n try:\n bool_, data_response = getclassbyid_(data['User'],numid)\n if bool_:\n code = 200\n else:\n data_response = 'Forbidden'\n code = 403\n except Exception as e:\n print e\n data_response = 'Internal Error'\n code = 500\n elif rol_ == 'Student':\n try:\n bool_, data_response = getclassbyid_(data['User'],numid)\n if bool_:\n code = 200\n else:\n data_response = 'Forbidden'\n code = 403\n except Exception as e:\n print e\n code = 500\n insert_general_record('getclassbyid/[id]', \n {'data': data_response,\n 'code': code}\n ,user_)\n return jsonify({'data': data_response, 'code': code})\n\[email protected]('/getallchallengesinclass/<idclass>', methods=['GET'])\n@token_required\ndef getallchallengesinclass_(idclass):\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n data_response = []\n code = 400\n if rol_ == 'Professor':\n data_response = get_all_challenges_in_class(idclass)\n elif rol_ == 'Student':\n challenges_ = get_all_challenges_in_class(idclass)\n finalized_ = []\n initialized_ = []\n no_init = []\n for x in challenges_:\n response = get_status_challenge_by_id(user_, x[0] )\n if response == 'finalized':\n finalized_.append([x])\n elif response == 'init':\n initialized_.append([x])\n else:\n no_init.append([x])\n code = 200\n insert_general_record('getallchallengesinclass/[id]', \n {'Finalized': len(finalized_), \n 'initialized': len(initialized_), \n 'no_init': len(no_init), \n 'code': code}\n ,user_)\n return jsonify({'Finalized': finalized_, 'initialized': initialized_, 'no_init': no_init, 'code': code})\n else:\n data_response = 'Forbidden'\n code = 503\n insert_general_record('getallchallengesinclass/[id]', \n {'challenges': len(data_response), 'code': code}\n ,user_)\n return jsonify({'challenges': data_response, 'code': code})\n\n\[email protected]('/getchallegebyid/<numid>', methods=['GET'])\n@token_required\ndef getchallegebyid_(numid):\n \"\"\"\n Returns all defined categories\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n data_response = ''\n if rol_ == 'Professor':\n try:\n bool_, data_response = get_challenge_by_id_p(data['User'],numid)\n if bool_:\n code = 200\n else:\n data_response = 'Forbidden'\n code = 403\n except Exception as e:\n print e\n data_response = 'Internal Error'\n code = 500\n elif rol_ == 'Student':\n try:\n bool_, data_response = get_challenge_by_id_p(data['User'],numid)\n if bool_:\n code = 200\n response = get_status_challenge_by_id(user_, numid )\n return jsonify({'data': data_response, 'status': response, 'code': code})\n else:\n data_response = 'Forbidden'\n code = 403\n except Exception as e:\n print e\n code = 500\n insert_general_record('getchallegebyid/[id]', \n {'data': data_response,\n 'code': code}\n ,user_)\n return jsonify({'data': data_response, 'code': code})\n\n############################## END COMMON URIS #############################\n\n############################## CHALLENGES URIS #############################\[email protected]('/getallchallenges_status', methods=['GET'])\n@token_required\ndef getallchallenges_status_():\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n message = 'You are not allowed to access this page'\n code = 403\n finalized_challenge = []\n no_finalized_challenge = []\n no_init_challenge = []\n discard_id = []\n if rol_ == 'Student':\n all_challenges = get_all_challenges_student(user_)\n finalized_challenge = get_all_challenges_student_finalized(user_)\n for x in finalized_challenge:\n discard_id.append(x[0])\n no_finalized_challenge = get_all_challenges_student_no_finalized(user_)\n for x in no_finalized_challenge:\n discard_id.append(x[0])\n for x in all_challenges:\n if x[0] in discard_id:\n pass\n else:\n no_init_challenge.append(x)\n message = 'ok'\n code = 200\n insert_general_record('getallchallenges_status', \n {\"code\": code}\n ,user_)\n return jsonify({'message': message,\n 'finalized_challenge': finalized_challenge,\n 'no_finalized_challenge': no_finalized_challenge,\n 'no_init_challenge': no_init_challenge,\n 'code': code})\n\[email protected]('/getallchallenges', methods=['GET'])\n@token_required\ndef getallchallenges_():\n \"\"\"\n Returns all challenges of professor\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n challenges =[]\n code = 500\n if rol_ == 'Professor':\n try:\n challenges = get_all_challenges_p(user_)\n code = 200\n except Exception as e:\n print e\n pass\n elif rol_ == 'Administrador':\n \"\"\"\n Administrador - pendiente\n \"\"\"\n try:\n challenges = get_all_challenges_p(user_)\n code = 200\n except Exception as e:\n print e\n pass\n elif rol_ == 'Student':\n challenges = get_all_challenges_student(user_)\n code = 200\n else:\n challenges = []\n code = 500\n insert_general_record('getallchallenges', \n {\"code\": code}\n ,user_)\n return jsonify({'challenges': challenges, 'code': code})\n\[email protected]('/getallchallenges/<category_>', methods=['GET'])\n@token_required\ndef getallchallenges_by_cat(category_):\n \"\"\"\n Returns all challenges of professor\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n challenges =[]\n code = 500\n if rol_ == 'Professor':\n try:\n challenges = get_all_challenges_p_by_cat(user_,category_)\n code = 200\n except Exception as e:\n print e\n pass\n elif rol_ == 'Administrador':\n \"\"\"\n Administrador - pendiente\n \"\"\"\n try:\n challenges = get_all_challenges_p_by_cat(user_,category_)\n code = 200\n except Exception as e:\n print e\n pass\n else:\n challenges = get_all_challenges_student_by_cat(user_,category_)\n code = 200\n insert_general_record('getallchallenges/[category]', \n {\"code\": code, \"category\": category_}\n ,user_)\n return jsonify({'challenges': challenges, 'code': code})\n\n############################## END CHALLENGES URIS #############################\n\n############################## ONLY PROFESSOR #############################\[email protected]('/getallnickstudents', methods=['GET'])\n@token_required\ndef getallnickstudents_():\n\n code = 200\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n if (data['Rol'] == 'Professor'):\n data_ = get_all_nicks_tudents()\n code = 200\n insert_general_record('getallnickstudents', \n {\"code\": code}\n ,data['User'])\n return jsonify({'data': data_, 'code': code})\n else:\n insert_general_record('getallnickstudents', \n {\"code\": code}\n ,data['User'])\n return jsonify({'message': message, 'code': code})\n\[email protected]('/getallcountries', methods=['GET'])\n@token_required\ndef get_all_countries_():\n data_ = get_all_countries()\n code = 200\n data = get_info_token()\n insert_general_record('getallcountries', \n {\"code\": code,\n \"contries\": len(data_)}\n ,data['User'])\n return jsonify({'data': data_, 'code': code})\n\[email protected]('/getstudentinclass/<id_class>', methods=['GET'])\n@token_required\ndef getstudentinclass_(id_class):\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n users = []\n if (data['Rol'] == 'Professor'):\n users = get_all_students_in_class_by_id(id_class,data['User'])\n message = 'ok'\n code = 200\n insert_general_record('getstudentinclass/[id]', \n {\"code\": code,\n \"users\": len(users)}\n ,data['User'])\n return jsonify({'message': message, 'users': users,'code': code})\n\[email protected]('/getstatusresponses/by_challenge/<id_challenge>', methods=['GET'])\n@token_required\ndef getstatusresponses_by_cha_(id_challenge):\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n users = []\n rol_ = data['Rol']\n user_ = data['User']\n waiting_evaluation_challenge = []\n noinit_challenge = []\n in_challenges = []\n evaluated_challenge = []\n if (rol_ == 'Professor'):\n students = get_all_students_by_id_challenge(id_challenge)\n # without_init\n for x in students:\n print x[0]\n bool_, info = student_init_it(x[0],id_challenge)\n if not bool_:\n noinit_challenge.append(x[0])\n else:\n if info[7] == \"0\":\n in_challenges.append(info)\n else:\n #\n id_last_challenge_activity = info[0]\n bool_, marks_ = it_challenge_has_been_eva(info[0], user_)\n if bool_:\n evaluated_challenge.append([info[0],info[1],info[2],info[3],info[4],info[5],info[6],info[7], marks_])\n else:\n waiting_evaluation_challenge.append(info)\n message = 'ok'\n code = 200\n insert_general_record('getstatusresponses/by_challenge/[id]', \n {'message': message,\n 'waiting_marks': len(waiting_evaluation_challenge),\n 'without_init': len(noinit_challenge),\n 'users_in_challenge': len(in_challenges),\n 'users_evaluated': len(evaluated_challenge),\n 'code': code}\n ,user_)\n return jsonify({\n 'message': message,\n 'waiting_marks': waiting_evaluation_challenge,\n 'without_init': noinit_challenge,\n 'users_in_challenge': in_challenges,\n 'users_evaluated': evaluated_challenge,\n 'code': code})\n\[email protected]('/evaluate_student', methods=['POST'])\n@token_required\ndef evaluate_student_():\n try:\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403 \n id_response_ = request.get_json()['idresponse'].encode('utf-8').strip()\n marks_ = request.get_json()['marks'].encode('utf-8').strip()\n evaluator_ = data['User']\n if (data['Rol'] == 'Professor'):\n bool_finalized, challenge_id = challenge_last_activity_finalized(id_response_)\n if not bool_finalized:\n insert_general_record('evaluate_student', \n {\"id_response\": id_response_,\n \"marks\": marks_,\n \"message\": \"The student's challenge has not yet been finalized\", \"code\": 400}\n ,evaluator_)\n return jsonify({\"message\": \"The student's challenge has not yet been finalized\", \"code\": 400})\n bool_permissions = have_permisssions_student_resp(challenge_id, evaluator_)\n if not bool_permissions:\n insert_general_record('evaluate_student', \n {\"id_response\": id_response_,\n \"marks\": marks_,\n \"message\": \"You don't have the permission enough to evaluate the student in this challenge\", \"code\": 400}\n ,evaluator_)\n return jsonify({\"message\": \"You don't have the permission enough to evaluate the student in this challenge\", \"code\": 400})\n bool_has_been_evaluate, marks = it_challenge_has_been_eva(id_response_, evaluator_)\n if bool_has_been_evaluate:\n insert_general_record('evaluate_student', \n {\"id_response\": id_response_,\n \"marks\": marks_,\n \"message\": \"You have already evaluated this student in this challenge\", \"marks\": marks , \"code\": 400}\n ,evaluator_)\n return jsonify({\"message\": \"You have already evaluated this student in this challenge\", \"marks\": marks , \"code\": 400})\n else:\n bool_all_ok = insert_new_mark_challenge_last_activity(id_response_, marks_, evaluator_)\n if bool_all_ok:\n message = 'Marks entered into the system'\n code = 200\n else:\n message = 'Something went wrong'\n code = 400\n except Exception as e:\n print e\n code = 500\n message = 'Internal Error'\n insert_general_record('evaluate_student', \n {'message': message, 'code': code}\n ,evaluator_)\n return jsonify({'message': message, 'code': code})\n\n\[email protected]('/createstudents', methods=['POST'])\n@token_required\ndef createstudents_():\n try:\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n inals_students = []\n filter_invalid_email = []\n filter_used_email = []\n filter_used_user = []\n finals_students = []\n filter_invalid_email = []\n filter_invalid_country = []\n filter_used_email = []\n filter_used_user = []\n filter_error_info = []\n if (data['Rol'] == 'Professor'):\n students = request.get_json()['newstudents']\n if isinstance(students, list):\n code = 400\n message = 'Bad request'\n for x in range(0,len(students)):\n nick_ = str(students[x]['nick'].encode('utf-8').strip()).lower()\n email_ = str(students[x]['email'].encode('utf-8').strip()).lower()\n sex_ = str(students[x]['sex'].encode('utf-8').strip()).lower()\n school = str(students[x]['school']).lower()\n class_ = str(students[x]['class']).lower()\n birth_year_ = str(students[x]['birth_year']).lower()\n birth_month_ = str(students[x]['birth_month']).lower()\n birth_date_ = str(students[x]['birth_date']).lower()\n country_ = str(students[x]['country'].encode('utf-8').strip()).lower()\n if not validemail(email_):\n filter_invalid_email.append(email_)\n elif user_exist_tocreate_user(nick_):\n filter_used_user.append(nick_)\n elif email_exist_tocreate_user(email_):\n filter_used_email.append(email_)\n elif not country_exist_tocreate_user(country_):\n filter_invalid_country.append(country_)\n else:\n bool_ , pass_ =create_user(nick_, email_, sex_, school, class_, birth_year_, birth_month_, birth_date_, country_)\n if bool_: \n parameters = [c.get_platform_name(), c.get_platform_name(), c.get_web_url(), nick_ , pass_]\n email_to_send(parameters,'new_account','Your new account is ready to use', email_)\n finals_students.append([nick_,email_])\n code = 200\n message = 'ok'\n else:\n filter_error_info.append([nick_,email_])\n \n else:\n message = 'The data entered does not have the proper format'\n code = 400\n except Exception as e:\n code = 500\n message = 'Internal Error'\n print e\n insert_general_record('createstudents', \n {'studentscreated': len(finals_students),\n 'info_error': len(filter_error_info),\n 'invalid_emails': len(filter_invalid_email),\n 'invalid_country': len(filter_invalid_country),\n 'used_emails': len(filter_used_email),\n 'used_usernames': len(filter_used_user),\n 'message': message,\n 'code': code}\n ,data['User'])\n return jsonify(\n {'studentscreated': finals_students,\n 'info_error':filter_error_info,\n 'invalid_emails': filter_invalid_email,\n 'invalid_country': filter_invalid_country,\n 'used_emails': filter_used_email,\n 'used_usernames': filter_used_user,\n 'message': message,\n 'code': code})\n \n\n\[email protected]('/challenge/new', methods=['POST'])\n@token_required\ndef new_challenge_professor():\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n title_ = ''\n summary_ = ''\n description_ = ''\n aims_ = ''\n photo_ = ''\n category_ = ''\n user_ = ''\n id_challenge_ = -1\n if (data['Rol'] == 'Professor'):\n try:\n title_ = str(request.get_json()['title'].encode('utf-8').strip())\n summary_ = str(request.get_json()['summary'].encode('utf-8').strip())\n description_ = str(request.get_json()['description'].encode('utf-8').strip())\n aims_ = str(request.get_json()['aims'].encode('utf-8').strip())\n exist_photo = False\n try:\n exist_photo = True\n photo_ = str(request.get_json()['photo'].encode('utf-8').strip())\n except Exception as e:\n print e\n photo_ = \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAATAAAAE1CAYAAABgNrxAAAAgAElEQVR4nO2dW5Nj13Xf1977XHBtkrIV2W5ZpElL7NFcOeS0bJerkvK4kiqV44ipPCVVynNKfsyL/QHiLyA/p6QPQDKx4rJLGiVxOZVoSPE+7NaVosimKFkWyUZ343LO3ju19gU4QOOgcdAHDRz0+rGG04PuBg4OcP5Ye+21/otprYEgCKKKcHrVCIKoKiRgBEFUFhIwgiAqCwkYQRCVhQSMIIjKQgJGEERlIQEjCKKykIARBFFZSMAIgqgsJGAEQVQWEjCCICoLCRhBEJWFBIwgiMpCAkYQRGUhASMIorKQgBEEUVlIwAiCqCwkYARBVBYSMIIgKgsJGEEQlYUEjCCIykICRhBEZSEBIwiispCAEQRRWUjACIKoLCRgBEFUFhIwgiAqCwkYQRCVhQSMIIjKQgJGEERlIQEjCKKykIARBFFZSMAIgqgsJGAEQVQWEjCCICoLCRhBEJWFBIwgiMpCAkYQRGUhASMIorKQgBEEUVkCeukujv/70itfDkR4UK/X71/beaJzWZ43QSwLprWmk3tB/PXffus9JWEbgHWEYIfAAFpx9EIQioM4ivfr9fq3rl27QsJGEHNCAnaB/Lf/8c1DzoM2PqJSEjhn+AKAVAq00sA4A8HFQRAEB4EQB1EU7IchRmy1e7duXt2/LOeJIOaFBOyC+M7Lrz578N7PngvDGnAuQGsFjAEw8zUKmgaFQmZeD/uaCG2FDQA6KHZciAPBxWEYif04EvthFGHUdv/6tSsHm38GCeI0lAO7ILr9wZ0giMy+if3MYCBlCjJJgQMzQiY4h/EPFAZBwFHo2niz1GpnkAxgkMJut2tFD8XtnXfeO+Scdxjj0GjU7oVhsFev11586tb1+xt8SgmCIrCL4ht/922T/9LaCBIEQQBpOgChATgfbQZnXw/JTh8cfp8pDQJFjzHQGMUxZraTNWOQygSMQjJ2YG5nzOTUwjDca281XxBCHNRqtfvXrj5JuTai8pCAXRDf+Nv/+Z7WKGBWsFBcMA/GJs4/3m7+BgaKWcHyrxH+nhE7t9TEnwEY/YzSCoQw0gYa/0Mdc/eL/8bH03556nNtQXAQR+F+FAf7QRjuP01RG1EhaAl5QTAmOlImJvqyOS/MgXETNWUZyZke/sOLGoqUlPL0TzPzQ8BAgJq4fXR/DEQYmcfG/JsC2O4l6TYMEjg66VnBAw3vvvcLFMED3DxAcWvE+l4URfs2avs8RW3EWkER2AXw6msPdt557+f3OWdt/2gmgmJT1ogZsq/NWT97HiaPxT8uimzAXMymdQdFMgyCvTiO91HchGAH+PXTt2/cW9+zT2wyFIFdAN1u967Ssi1YaB7M7zaeJUpli9bkY05+bXJqGfFCJAjzNxfMbCT0U73bT7u7HLTJu0mtOgfvf4BL0sMoDPdFEBzU6+H9MA734zDepx1SYpmQgF0AJyfdu6bOKyMQ2cR9HtOiomXhBcsfG+bS8G8tARSMHzMuQcEcnwahdBtzb0pBu9tPtlk/gc5x58sal8icw0/eea/jc21C8INWs/aCCIODp2/dpFwbcW5oCXkB/M3ffnOvn7KdMAhc7ms82pnEJ+WzEZP/nWWQzbH5Y/KPx4UwebehwBkRwySeNqk3/D53v48/M3p+o9q2USaOgZYD4IybTYQwsnk2xngnisR+o1G/h7k3itqIeSEBuwD++zf+7hB41MZiVH+BCycMWfC1EIwdYISCX/f7g13zbRQCsJX65uLHHczM72VFLvt6otSoBZ5eVjg1h4yocSdMyhwTHkve72d/3u642mVzZAp3R0W7+B9zEofPkgF0GGOHQogOln5wHnRarfCFOI72bt64Qd0IxBgkYEvm9df3t3/63vvfUYxtT4uf/PkXjEPS78KnPvXrf/b7v3/nr7I/8+Ct77ePj0/+uNvt3U3T9LfSRG6nqdzWWm4ppdv28meuzMLmrIaipu0S0IgGP70kxd/J5uQmozEMuMzttpDWfd9GWHlmJud9T03+vi8R0fY8daIo2guj6CAMgv1mTdzDjQSK2i4nJGBL5v7915792S9+8RwX09ONNgoBCLCkQg46jz7+2Pa1K5/bYgwO0lS1g4BvZX781EX65gMUt+Nnu93u7mCQ7PT7ya6J1xSYHU/BmS2xwGgoCMbEwZRlYC0auPoyZuvPrGDYmMhHWf73/K9j/uuimLbEHQosmCi2w33UFoR7YRS+X4vj+/VG7d6N658nYdtgSMCWzP/+++989cOPDr8ioiBblDWE+/YhXCamSedP//Rfbkml24KzWTVX2/4L7Cbi7LSwId99+fW7x8cnd/v9/i5GbDLVbSnTbYlLuSAw+SuTl8LaMOWEwZWVoejhsaUY+Whw0Rjz33UR2CIL1OL46NAXAGdzhIGLIM0frdxC1CTk8ElBIIIO1rUFAda2RQdBEB40GgHm2vZvXL9G4lZxSMCWzLfu/cNzJ93es0yI2Q8kFUZL+1/84h9dkQDbYkq0dRZKqTbno4hNKWV6JCd/DZe13V7vvW63+1cnxyd3cQdxlD/TWwBYr8asYAXh2BLTC6792ekCVvZ7arT7qU9FYShanAnr7OGWwOjuEWAfKX7t8m3SbqeanVGAFGyDPD8c7pAG/P0wCA/a7ebXb1y/SsJWEaiMYsmkSbotuMiNVfDiMgl9UNCIG3t42yLiBfZCR7HqZP499v2B1ChUW9dv7GBU9ek0VYdROC5wL7702rNY9jEY9HekVO3BoHclUbJtjtO1Mvl2pNA0py+fyWWjFyorqMztdo52SvH2VGt8viAENyEqQ0kLvABzwOeWat1OBsk2dsfb6K4H//TLX/35T955z9S1YZSGXQjW0ii+d/sparNaNygCWzJ//Y1vHgoRtuW09aMTsDAMIRn04dcefuQv/vAPn/nLiz7GRKXtgIst31uZSnUYiHFhe/mNvd1er3en3+tfOemiwA12cBM1KybMNZgzzqDUgo+8O8ONBSZd15XrDWWu19RVb3AUMA0ZqyJrYcRyIkWh2djGB3N9pFjXZnZJuejgcjSK4r04il7E6K1Wg/u3bt2iHdIVQAK2RF566bW7H3zw8+c5j9s65ypUzBobyiTpPPrYb1+5eW1n3ZYvJt8mlT7My8thrg03EXq9/p1+v7crFWyZ3JrbSPDlH3bHc3K3c3Q/eSIvcrsSdKao1jaw+xZSLz5Tf29UlgbZdnf7Lz6sY/PHhuLHua91s4+ZLfxlIFHYDnATAQUNI7cwDEzerV6P7t24Qbm2ZUECtkT+4e//33/58KMP/xyXLypHwFJcPGJ+BhT8u3/zJ0xp3fYWOGuOETYl9SEX04/3jTf2t3u9/i5GbL1u704q023Ogg7m6rRW2ygGw40BzFflPF+u885ePgym7pmc/XsTu67zocHqmc8V8lHeDmTH6qTNt6FPW4w7pPXo/s0b5LJ7XkjAlsiLL77yrJaqHUTx11KVwLSFlb9oA8H/+JmnNqMpGgWKcb7FciI3rGuzEVvX7I6iaNu13/T7Cxg70JqZzQlXBNu20Re089eXCx/7lmteB/d47bN+h3He0cr+nNbDTgtjMKkk9pD6aA03StQW5t+USrYY18b5Qwibb8OoDfOYDz/U/OrVq1fJ+WMOSMCWDJYuiIBt5T2KzJSDuktx05cbdkkq1aEQp3dICaIIJGAEsUQ0JNsMwuGHklIDU+qiND/UeswNZGuYrtP6kBVIIzx4sGci0atXdy7dBwIJGEFUnP/17X/42q8++ujLQRR1QhEcRHG8F8fRi3EUGXuj209d3djyD6oDI4iKI0TQiaIYgjBqS6l2ut3ezvFJ91n3rDrvH7x/CK5OEMs/ms3GPXT+uHnzSuU3EUjACKLiaMYPpSlbUc5enGe3NtrSbHpgMz7AoN/f6fR6z+p/0vDjd98zQ184Z7iJcBDXai/Wa/H9KI73b1/fqUTURgJGEBtOvnmmNiP7cFe33+9v93q93Y9AfwV3y3/27rs41Qr7SA9R0GpxdN91JezfuL4+zh8kYARxaWFDG6Ysrgh4Gxv8Eymh1+3sdhj7sv053XnnnXfB9pGGB7U4fjGOo/tob/T07WsXXgZEAkYQl5RJ11+P6TxgwbAlTASjtgQN0Ga2YLedJOn2YJDuQucYo7aDb7z/Taw6RgcQ4/wR16IXMXpDY8qnbnx+Kfk2EjCC2HDyKg1GumW7B7yYGXMB67E0NLA03QbWV8n6xGFjfxDa71g7o21gtse0n6rtbnKCwyCe9TZN77/7wUEcBvsg2GEYiffjKLj/e1/Y/fp5zzwJGEFsOLNnKTg/OD1+m2nsGtalKbvUZLavFUuvTUeElFOdfIUIjJWRx06TF9u9QbJtfNuOUwgD8SUAqK6Avfbgh9vd4+O7xv9dyi0mxKFWaotxdph3voUIS00eaq230H8d7LTsDvMDYhnkHsOCj2TdEhiDa09+lqrPiVJhWm0xhUs3PoyU7I6kzW9JFzFBZvAKuIEsqErcGVYqM9Hd/BQI8GaXo4Z5GHZYZRrZp1woWc+24W1CmaguiiJIBhI+8YlHSnFdWZmA/eyDnz93dNTZDYPQDLfAsNWqeX5hbbZy+dT3FjqK7Nxqa6nMjbGfGnvRzou1Y7GfXj/4wY/dJ5x9VL3gkedRspHNjOPTS3ksBhnXV846zBoTlvrBhQno5Y0JHmH6IcX0R0ILbOy7xA9QN/TEfB0Ewfv/4p//wZ8VeRwp03Yy6BlPNDsSL7A+aODey064mHO05e5aQzVSqbIiBiMHDmsQLDPiNJknm/6cZkV65tpG7zZ3XTUazXtzOA+fycoEzNj8YqgZBMN1t7VamXVBlz/oFaaeeL6wJE7DOhyMQmwY85Yv7WEuGF2qyENGuIa3MGyENk3bO2U+N5lne1G6m6yCJJ1+jjBbxDLvP/+4YagL11/9iz/6w//4xht7f9HrD3bRQrzX695J03RbabWFJRJRWN+XUu7IJDUiYgQOp0OZ2jA2fA96HzQTwWk5bEyHnCnx04av5InY0LgzSc3j3rxxZV/bvthqCpg2Xf8ji+DhmK0ZUdYscVtkZmLWqvjU8ZX4Pp4WUo8oV8HKnh2pVLnnPJ/TAuYtt9QcQ4CLPlIe5XbW6eHMzNPHkBmBx9hwyadzRtWdxfXrV1CY7wsBz0/+6He/+8bdbq+/2+/17iRpui2l3B4kgy2zEgyC9vA1ZmCEDaM3dK2FCXF1Hyaj57DA649DZKIgOLD3pw7Zgs/XszIB41wcDh083Vo9axJ3EUybywhLGSKrxy6bZQ2ohaVM8J51f2U/Vs4kcpdELot16f4dRj64dGPu34s/zYPM2IXh0Bep1OHTT1/H+qxTNVqvvb63c3R8tCdT9Zf9wWA3GQy2pcQ/ACl6tvmPFWNIaecO4B8fZOTlv/JwMxcgjmITZXJ2fjeS1UVgbqhpdlDDZAg6+oBi7nvlHkNehbKGst/leOxqLKdQxhJy6iGWPlBj1gGWGxmNJ4czt6IDaokCNvucl/vBxXI+lLUZTuxdYEe5UA4813qpAMOcoci8x6WSQ28zs6F0Y6cjgH16moX4S68+uNsfDHYG/cEVtBIfDAZXBkna9qsksxTFnBpwm287w0GSuaHOggeAczxLeI6GFQqYGr5Q2cnSAKPoZ2Tja9fPMCtCm7HUyQPDWchOkIbRcSwlGT52jO7NO2PJfBbDT/DMBVB2bOcvsknBxTfxueUrczoUO71hMHwsVu6rMStKWOwDZdb98bEPZjuezru1KuBu6exmeS7hFRwhuJiMeMy/7e5l1ohSw1O3ru6rVN8Pg9NJ9u+8+PKXe93+LgocDlY+6nZ3a2EMOHnLzhk9/RxwpxM3EVKVdBqNWmkV+yuNwLKCMYrAhj9hR/tpZcZkCS0gmnHJ6EVyYBPDZv29m4GvpSaoT4fbZVySXDOz9OCZfIoqeYHk72100Y8mfvNznqPsa2aGnU2co/M+k0mhGv+gzPmdBR6H5fyW3dkT5lXxH5K2jAHGhgaD2wU3u4iaHS5wCKUwmmrFTBGFqcB3rrToVwYub/WFO7e/nq3h+va3/8/X+v3ky+YzOu99rTSIgMNApW23pN0uw7xz7QpZ3ZayG39lh6pGYWRuS+xPlP6Y2SQleCHjZ8xxXCH+E30sg+c+BHjJx82Gjze6zezRmi3x4pd7VvL4xO2sxOXv1A8IPassxP/eYo82Df+RYv6v5Fiu184D4Lkiu044c8UOZGYGOLZt7k10kuTYTHuCgOcOcE6SdFoUeC7WshLf1Kyw8SbTJEkgjOKSH8cn7WFs6aqXUCJQ6huTsVNrHXMhuKGuZRKZJVDmMfxFhlHqAk9ptASdnEJU7rKd2ZzAqYuJLTzuYxbTPzTslCTjew9pmo5tGk17P3gv/ZIPbpmYCOro+OhLpjQjCGBglpBTcIFJXIvM7FOl5GEZH7ZrKWD+UwprxPCiQfHCr+WgV2rhp79g9ETBJi5uxczkdTGMFpac7548C9hzpjMV12UhGR/uaoyVVPDZJRZ5jPI/9v/+c8I+TFnn/HRy3J8jcNFjmeSfcWafmBMn/37O220vf/f7YsDzarJ4Kr/Azu9ANhoNk//iJUViaydgvirf14XhGz6O470nnnj8P7dqwSta61rZj5kqWQOF92uK+OrzLDWKoJWuKaVKO26lVY0B64GVAHO/TsBqSqp6aQduHsudF3d+sixSsuHrn8xx+/NsW1nqqqTXljHo4fnWGmqZ9EANRjV5pZ4jmU4/blxSJUo//PHhx3+Er08cx2MCla2xGm1c8ZXlwBbh5Vfe2NWgt8w1O6NSAPsm8VpGJ1ipYFvwcobXrGUODCdVDwYDI2b4B1/4zz2+/TdrcHgEUYgPftl5+K239j78+PDjseiripHWNNDhlTHeZq4FLy8fiJtLAef7cRTdFxzKKBUxlF3Ic264YNBLTHBhIjGOyyKV9tbtOAliHjikPZ0OhjuOGHFKs/0yEjO/KeNXHUXBgtTX39jbXsULctzpfVlzYXJfsyoBTLqGabh27cmO0qq0KJPsdAhiiaRp+ojZgTTRSQD4kYwN3hpLVqWGc5QBDvngg59/rdc92f3pT981yzJ0SvUTwNEG+vZT15bmbz/Ansth1UB+LyQWvXIOdrhICRX4HhIwglgio/o8ADSmUK5diGtXOlLCQ6MxAg8iEEGIfY7QS9LtXr+zC9D5CgZ+BwcHHWzdC8NgH51Srbd9sB9F0d6tm+dzSpUy3dZucwZ3WvOXxmj3U64dFpCAEcRy4Zx3x2VKD3fkyvI8wQLUJJWQahvpBOiUytioe0JKbAFq9/rJdrc7sLe51p4fv/NTiMPwQMl061//yb8qlJvCZaupZwtsOcSsvJ4poYij/VTDdsDKmz5PAkYQS2RUGjHqNjE7xqy8kmwUJxGIoacetvNkl3PGssp0buixXU/uyky0UtuLVA8eH598CTi0TYmT22XMy+FJpWFrq/310vuZy707giBmU/7uI0Zg2EspQYFiGq3pgQfM5NqwxnaQ9CFNE7dLaEsdTE6OYQ5LQaIUNJrtUzY8Z4GN3uB6llHAZkVgAeedmzc/v69LTOADRWAEsVxGXnAj7/gleLa1TXO9Hjn9gt/d1ADoepzFlHMYn3oNdRGCVAOIAni/6OOenHTvRkEMSTIeffnnNywZMa4V0ghXUGICHygCI4hNwFlCe1NIq5f2FtcKlv0D7nbTkI+CqhXglKCiJyJN0h1sB7JejLmtUeZYRCiWMgyXBIwgLisuQsPlX6vVfh7tdIqcCfRps0vH8duHpgimdIIbe+pms/HCMs7yxi4h33pr79//4y9/9R8YE70ym4RFwD5cRhU1Y6yXsXspvXDXOmvy8u/XenV1wXl2mWO3LTKltU4JIT7Knpux879IO5PZGZwOzzlHpl5L6RrnrPfk5373uQWfylpizA2vXim0tHvl1Td29dBolA0Lc7PWWL4lEHdIW20rkM6ypzQ2VsA6ne4f/OrDwy+i51eZ3gNm+NQCTczzwPiouZwvwzaoZBcG3Fmy1jrOkyzjVKFLPEcSfNmBz/CMzs0i/mf5lUr5TfzK+X7FcbS3MQKG7iVaQeQsnotwjCMRubAfYMYx9/SAklFngYZbN6/t425p2U9hYwUMvaUwwchEYKcMl3fHS/TMHOGbnMuk7ONmYvwYWbYWqERbssm7WsVMAWYm9JgnvbTHvmhM+5LU0GjWXiz60JjA915x6pQp6fjXQRAuzSJohY6sULoaZ0mVrkmpAE1Xy3y7K2dbUxbjMwDW39wui+nvY8sYJHKa7GNMG/G1fNiYi/DGoNAsIdwr+nQGSboDZjTbaAkJLi8mnM+XcZNBkQnt/c9qNVqUlQmYG+R5apjH1H9D8XyK6Z3lyvjolys4sDTf8qo5FFyEcHlWfW4CJowljABdKI/oXRoEc06sGMnxkQx677vZo/fOeAyAziJnB8cUShhAq9V4wdlGzx0pDSRsx5EwXn3oFoOlFJprSNIUwiA2bUW45FdJH1rNuonwys5/wbruQk56Ji0jqU0QF4GfpeibtoeevxMut9Pe+/OiQbcXkT58aDQWvH7tygE4z/t5ePn17+2CHs952cp+Djis2j4n66icagXNZr1wkey8VKKMYlO8k4jLhzVPtOUKZ3HR73McpsucgaIsUCF/dHT8Jcgcr7XKtstIE1G6in/AGZJSwTNP37wnC5ZozMvaCRiJFbFJTE6znoYe71EsvsxacCWPO5BhGBo3iiIV8r1eb9dbg492HUeW46bXE6NOqYYW4su6qisRgXGWX7dDEOuM1lOsuN3fKjPY2bPQEtIJSOHfUwqajWbhGY39/mDH5+7YRC0YuPme3LldxHHNCOQy8l9AlfgEsVwmBUrnbH6cq0fyHOFNs9l4Xqpi9VnoAQYT08P8H3C706bFSCuo1+ulDbGdxsoELLszmG1yHVqOZGY1LrI7g7+RuInPBLEqsLKfZSYTGVubKUJ1nl1IJdXWrDnp2YG+2WsMY8Bbt67sF7lEXn9jfxujqex9gW/c1nbjH1uHOE4oUgnU6uF9KZeT/wKKwAhi88l67/tJXzC2rJtfOLvd7l2A0zWck8OhfUd5s9m4JwQvbYjHJCRgBHEJ8NGS969HO50osgWmRfJTx8cnd/O+x4bT4e1Ufca5LdFYIiRgBLHhTObXjJBJifmpwi1E/X5/d/ZPjIRScOFLM5YmYiRgBLHhTA7QVbb+C+qN+r2iDdZSyvaszQb/LSypqNdrS03gAwkYQWw+KF5+YhBa3EilsGL+oF5vfIux+YfMvvlgv621yv15L5LYg4zgFO5ln9yNFTBG6kwsAcZY5WoSfcO1YMKIC5qIoLvG9c9/tlBtVq/X39XAciM27/pq9gi47tRq8X2pYKkDdzdXwBjrCWUnsRDEefFlP5yVbwpZDvnvc800cCGMspgxaFi7xZQRL6XmbyHq95NdPsMnyefasIgVG8XRA0wsWWEoSCGIDcd7dmHPYhCG5t+1Wt2YGGIz97zPHj3AZhXNjrVNub/TkqcQTUICRhCbDrNlrj76QodbzE9JWSyB3+t1d+fxQ0MRC50HWNlTiCYhASOIDYeBndLNnPmk6YFsNu5xPr+FDtjl5swdyCy1Wu3Foi1Ki0ACRhAbDhooYuN4wIWvz+pcv7ZzoAos71565dW7ZoDujJwyFrBakVTQbDe/IgoK5CKQgBHEpjPRQiQEd4Wl829w9bpdtNBpW6+v2TCtO41afesits9WJmB26ObpBu5ZHvFFGCTJb5I8E2VT3N58wi7HiMjIgibr5uDG8xfOGc0qRjXXVSohDkMYDLoQMmwl0ibyEgUS+P1E7/RSCTwM839IKogEPo+kffXzn+uA1ktN4MNK3SjmUPLzPQDZUBDVwnjll2zoiXcn0ObZNXQrl/8qej+DXn9oYjgLFOE4ivdhUXPGgqxMwJTW7WUOhcA6FO5HrRPEmjLF0LBw1DJbKNiwfQinBaVKdtrt9teLTuHuJf2hiWHuI+H3pcQEfuE5k4uyughM6a2LnGpDEGXAoNiAGQ0w95JzWSPbFHpz4cUujAC1b928ul/0ymMKl5x8ZloHC1ixTQkr8M95yHOzwizRFFdKbScgl/M68rLnwhKXGJ+zLVtjxsw7zzUTIv/37GhC+xxCEZgEfpHHefGlV561U+PZmXlp/CmM8Ioc+XlY6RLSf72MQR6MFZvfRxCrhi2YNxJnXD9CcJMDw4buyOWnZqwET3F8fHTX20xgM3ge1muMda5dfXLpuS/P6nYhGetkh9ear1Hljc+2AK2Z/dt88hU/TBymIMlSmigJ7XcTC4b1Sqpadtw+c+PHILMLOdyJt49RuPizr9W2mtULqe0uZ6ITqNVCs7wLRIEhtgO1Y69HNrP0wgqYMBGe0stt4vastIzi1G1sfHy7OWEqYzJUgMXmtBBEuWR9uPKWX+ddgZgNsVnfV9rucCoFjWZxi5s0lds4tPYs336sLIjjaM896tJLKIAKWQli8zH2zu5ZPn375r2k4A6kTNKd8WEg0zElGq3mC2BdOy5kGUkCRhAbji2j0MNEf5GA76XvvnYXq+9NHm3m/Elmxqi1Wo3nL2r5CCRgBLH5oIBhgWkUFZ/CfXzSfdbmoJlrRZqufsY0UQi4dnXnwhL4QAJGEJuPETAlodksPoW7e3JyJwgDI4CW6REYRmdxEO2Ds9O5KEjACKLiMICZUY8VlMWGbGACf7i5NlyKTn8QP6YNYPk9kJ5NtpQGgdu6tBVJlEjRZm5bAJpNfmszk7HMt6XQcJhfnYXTshXEgh9EcbCnQc+Vn0pSW87BIIBUcwAe2FYhNu5s4cGnF0XCRGCCL78H0kMRGEEUoGgr0bqgtN66df3a/rztSihSr72+t4NTiFC4hOuntJ5fox1JXwKCCfxGs3lP62I7nOeFBIwg5oQPI6qqMaqtnDf2i0Le6Z50d1Ml28yZu2AejHMBkwXo2nFrr50AABzOSURBVI0jeuap6/cY40s3McxCAkYQhaiegCkzxMM2WDNgc0/JPul277JM/6MXLD7lHARBcKG7jx4SMIKYBwwy7Miwyi0hcenXarWeTwp61Pf7/d3QTTEaoY09dTb60iaBH7sEPswtkGVAAkYQc5Bx7Krc6WKcdRqN2r2woEd9mqbbQRAYAcz2bPp/gx/ZZgQs2u8PLjb/Betmp1MmSumateqlbm7i/BgnnQUKnMwgXG2+MAlwb2tTtgGLn77tk+zgduLNNcDg8OaNq/tF7u/lV97c1TyA/qDvriFlRrIxxUC754JOCcadQibQboVfj6OLaR/KsrkRGJklEpeMab2Kk3Mm5qXXH9zRAO1JL4XRrqN307CTiO7cvnVPLeCkcV5WKGDLjYzsNGJNOkasFF1yzVdRcLkXhKHPS82dn+r2+7tajeq9cAdyrJbNLSPxtsCZJHK2/DFqk2x0DkznVQ0TxAWBvnRjU7eW9LDDMonMJ7b5Gi106vV7SVrQA7/X3wXQYzZAo0iOgcTn43oshbDTjZS6GAudLJTEJ4glYoVLGbv0i2KswBQAWq3mC0EwX32WH/Yhk2Q7m7jPJu0h4wJrphDFtkQDB99e9HuJBIwglohtPWLDdMYyUhr+Ip6aAwMNT926er/IFMMHD97YlqDaXIjJiUlD51hM3nN3/1hjJguWaJTFGgrYZJCN63Agf3viUrBIFJMYS+lxZ1cvPAGzyzvO58t/4eN3u8kOd+Wq0wwMcUKR8783Lq/NZuOromCJRlmsbqwajCv7tK/NyeMkYMR6oBaYFu+XjtyVTozt6OmJdhw7mqxwHqmvrCc+d2PPsss+EQVzJ+5TLU0UdXSUPMuAn8p9De8TfF+knf310ENbc7colQ0tIQmi4nBbbza2hPQmhoGwQzbmGRbC3DT7/qB/Z5YgMcHNjEiZSsBlZr3RxIjkwhP4QAJGEJuA71GEsSJWFLNGAx0i5puC79NkaZLMtNzxpSFokhhHEXzqk48wisAIglgIX2yqsol2l2x/6KH2VwHY1ny5tUzTNp8tDbizqqSCVqMJA6naFzXEY5JgFQ9KEER5cHBOqcz6danh1FoG16880bHR19kVaCEXnVdefbCrlNoSIoC8WZPZ+ZitrdZK2/UoAiOITWBKwp0Pp3zPv0PY63XvYC1YXvfRcBiv64dsNBr475XsQMKmC5i3cCOIc7+XnCNp0b5Czcz+o5k0jxOy8U/ZbXRmB1Jwd4wAgWLAtYKtZvy8Pfb5W4hOTvp3g6AGMu+ywSG5UkNodlUVtNsNmGVnvWw2u5XI/J/cKIjNhme8C7zZIP7fj1ErwqA/uIL3Mc20EJwzKwplKhO8f7TRWWmMsMECNq/7N0HMgXblCpyvXU0i9iRqpWzi3iTzFebEOmhiWPS+kkwL0TSsRQ+DJE1xyhE8/HCbKX3xPZCeDZ5KBO5ziJaQRHkwptdOwDiwjvfqtwtWm6NyHmBzT8l+7fUHOwp0GwUxL4EPvjhXaWi3t1Ao26vogfRsdhLfPDuKw4jzY+qceH5ksi64WlQQXBS2dj45Ob7r7XPy6sZQvHzVf6tldiBXlsCHTV9CEkQV8Y4Q82LzUu5SNuUU6ETMzbKuiMXN8fHJXcGFdZrIsaLyfvhChFBrFBqRuRRWJ2B6tCU73kVfjvBYL6TivWsEkQezOaDusk9Q4SWZspX22lxT+PcAmi07hbvIffV66a4IApAgZ9Z2pWkKQSigXotGh1BQdMtigyMwEi7iMsHM5ayZBqlS4wE2r8WzNzuUUm9L29QNOkcZ3AIT4jhCH7Dh7aqAXU+ZUCErQWwIJmoySz9TYPqdeS2eh8WvuHsppV0V5S4h7YCSZrMOrUadaa3NEjUIVpPIX52AXUCKyvgVUSqMuARIbYtsU6VACA7NVmvuJ42i9NL91571F2XW3mf6L2hoNlu4xsGSi5XtQAJFYARRfTCJz7Wb2agVxHENmmhxMwfoARYJ1jnqnjwXcFtTP6sODJTtt2w2G2tx3i6BgFEIRmw2LNOrjX/V63V45OEWk3PsQKI54S9++aFOkoHx9oIzTBsxWhNBCPVGfex2KS9pEn+eXULGqjfOnSBg+P4eH81vmfbBir5e8F8XOXEmatI4bIN34jj8t0rBtphzB7Lb68FgkJriVzxe9Pk6dWSZSoE4CuBTn/wEky7/hQhxyXJg3mQtO1/OdNBzNnabMmt6UVjAtIaaUgxWtDlCbBimIXuBp6RlWpe4Rce1aX7WZrqGNt2GWf8ufN9rhSPK+NxvWeUGaWAeyozYwDmOTB3eeebW82fYeY1gfOvRT/8m6/YHwHloashA8OE1OJxu5LcZzRLV3LYdrDj/Beu4hMwGZPbk0RKQuFQ8XPTJYvuP8ao3YljsdxnAwcuvvLnrfcS815fO/Bu/FwSBvR4Z7zQazS+sywuy9jmwiyoeJIgqYy179HApN08FvtLKDfE4+hLmv3An06+Egoyv2PjgEQ1PPXXz/rqcKtqFJIjK4gyjnH00Rky1Wm3uIbOc2WG3/V5/1/dAZj31JyeEZQfcrgsVEDCyxSEIJM3Z6TPZKW1tdFqt1gtKz+dA4VebaZqanzcCZdJibGxMGgqjz4mFYbRnb5Mrs9DJsr4CNrSuNSeUdiGJS08wa6eP2Y/6ZrPx3LwW9X6ZKZVs486jmVPJGTpZmEp87/3loy7Mg8VxbEwSORcrT+DDqpu5p8JHjpJYlGcHbELhtnfGcBiuMuE1QZTDmi2f3DKRm/yXAin78NDDD6EgOScKOYzYpjVbo831y6/v70rNtsMgBKmlyX3JQWqKy5TCpu7A9DlyLmAw6EOrXXsB5ozwLgLKgRHERqBAhAHEtWjYPpeNkiZzYihugWCdk5P+d+Sw79HnuUYBhh66xtheyd2nbjy/TjpOAkYQGwDmqJrNJtSiGL3Bzmzi5pxvdY5O9PHx0bTvGQXDqEu51iEjZDDcmVyL/BeQgBFE9cFeSCyWbTYa0GrWmZzLo57BSbcH/cEAC8WBT6l89bVhfiITFzaiW6ekDAkYQWwAmANrtuyU7IBNT7Bn82Baq0NsB8Lp2kKMmrhhSnuf74hp1OvfApOmXn0FvocEjCA2AM4ENBpNCObcgmSMd958sNdW2rYIqYwjoa/58stH9xu4w/nCYEXOq3mQgBHE3OjCY9X0FD0p66LzERXDYbaCQbtVn/nzPpHveyh7vf6udol7X6Sa/WNuw1YiI18SXV6fD6iQ1bHk8+BLL2aNhyKIucFdOKUgDMOPivyaYqymuO3p9bt5550Y74ULE/Hmb6U7cYARUmzLjyaiJDVQmXIKLK2wTq0n3eQuZ6MK/KyRIXMHiktTbkYRpXD9ymc787q8XhQb7YmPRXgUYhJlsg5DYiZHmWmt2jhkttFoMtd8PZajGg4sUrKdLa3odk/uTkveD+9XaZMfk2mKE7gLT/m+CDb2+qZpRMTmMh7DSSmh2W6byGtaD6Rf7DAnfH4V2O8PrvCJpu3x68Y6UaRSYgW+6bEsMqbtIthkAXPV+9RJSWwO6HyqXZmET7uji4QdMju9/mtkoeivBe1nRrZnDY2wS0plPMAajfp/gkVGvi2ZDY/AaKgHsTmgeKFdDu4ggh2iZp5bHEad1owhHqOgiplp3WiA+NJ3X7vLOevwKY4T4Hclh0NCBEZ4a2lptcECBrO3ZAiiYkx+GPuIKq7V9mq1/HbhaR/iJyfduwAsUxeWXTrawlWjj8pU5N9o1utiHc/WygSMAXSmbdtmK39B24bSxTzxtXOUXMbRE8ScJLLGJZYjuMGzwCGd2IMczWVEb3mR5t3x5PLNv7ejkO2Hocjd2VQZBZOgTSN2Mkh3mBeqzC4kc42QYRg7UZOgdcof/53fzj2uVUJ+YARRUbwuNRq1e60W7kDC1AR7NrryF/wgGVyZZU7ohQ1/N4qjwbqeodUN9QA4VdHLnbsk+ASi/8YCiSyyoSbKhLn/1pEvfOHO13FITt6Q2ezUbP8ckjTZFiLIfTbeDx//rtdrJ+v6ZlrrCIwbQ0O28Fg1Pq+zG0HMybpZKnvY/AWmB6+8+uYuqNnPBcsnTDO3ifAax2Uea5lQnSdBVJRJW5tsBf40A0PP8fHRzznn7Vm1kl7cOOfPNJtNisAWwQ8cXuRTb10/KYmKwqxX/CrBZSI4cbJzVRc7mF6vXxNBgF74uT+DTd5KYzI/HDz5uSdIwOZBuTW6UDqzM1O8gZYgSkfb1pqiHR4+/aF8Mt0ZAy7yAesfG3cjJ6vmsUI+u0s5q+C0n8pIw2xBFngdggbB9FrPhs7P4q0Io6h6tCHshgtQQp5YOau+kqcJlF0q6rmHbLz9k/cCpTS3IeUMEdV2Q60W19Y6eFg7ASMIYn6KtvYcHR21pJSvCsHPnPOIS8h2u7VWrUOTUBKfIC4RR0cd6yGWsc6Zhvm+KaGor23+C0jACOJy0T3p1Sanbk+DMY05stv1emOtl5AkYARxiej1+7V5NhCktPmv33ns02vZQuQhASOIS4RSylzzZmj0rPyX8QCL1n73f+0ETDGApISjIjsdokxSkCC5sZYptCM+aVhgCxtX89K8+eB7W4wFZiK3PR6bA5s0MzQurVxDrR6v/e4/7UISxCUBC1ilkq+ix7RM5cijf8qnvB2U21jrBD7QEpIg5qX6ofzR8TH6ThuDQhtt5djvKHP7M61W+/TY7jWDBIwg5sQtAyvbFdLv9Wp4yY97f01DQxAE6aOf+a21rsIHEjCCuDwkSRJg9IWpr2wb0aSg2Snc6798BBIwgrgc/OCHb9ekVEGAFjkZ1+Op+S+toFZf7xYiz+oETGesdDMmhtmY1TTP2snBhf3ttUJPfE6W0kQ5mJRR8ctlIOXDqemH1sNmbq7GJm2YL5XSZ7b2nIfjo5OGBvESWqsyEdiBz2x07fldSDsnkkGrWVvrFiIPRWAEsUSU1rX5EknD0f5LyTuddPtNdMEwGqr1qU0JL55O0J5pNNa7hciztgJmTiPVcBFEKRwfH7fshG4GUsmhYMLECgiH5KLQ/e4Tv7O2PvhZ1jsCG3knkZ0OsVGMLxVd9AUMl3BLicB6vV6NMzG0WTePz0c7kj4CM/mvKK7MTuvaChhzhmvOE38Njogglsuy3us/fvvdQLtBtfiHs4yVjhpPEmPeudGqxg4kUA6MINaLZeTAOp3OFufsZfCOsDhxO03NxS8zPZEmkQ96rYd4TLJ2AsbGdmeGa3OylCY2Fv+Wt0n2BbY6z+Do+KQNTLidRmkCMe4S+vZvbf429WFa7TabtbWvwPescC7k9PoG7dfk+J8aJRfLun+CWAjfiF2QUYHoaHloc06j+/Fvbw3FPffnoXPSb4EQoLh2+Tac5h1Zu3Zhj0VoDsIsL5W68tnHKrOEpGZugthwZJpyE2XZbUiz0whDccW/uXFfVUpCHNUqsfvooRwYQWw4SZJEAKcLx8EtX32UmEq59hbSk5CAEcQGs7f3/RaaGE4uTScr8M0fpdFCpzIJfCABI4jNptM5bHHO78NE5OVrv/CPsc+xmX1oNpuVSeADCRhBbDb9fr/OXcGqJ/u1LWzF/BgDwcStOIooB7YOaK1rZl+HimCJEuBKQaBl5Yqqu11Vm/V9KW2juVQJBCFPn3jiURIwgtg4mClAqNyzStN0ZqXBsMxDWxPDizuycthYAbPFr2ysMJYgLhPf+96PGkrJIC9qzBoZKjPEtlapHUi4DBEY9VESZcIYVKYr5PjkpKGUejnv+8PdR9dSVLUdSKAlJEHMB4PqmQqcnPSbs9oHhruSWoNg/JmHth766OKOrhw2V8AYkKEYcanpD3q1s1rq/A4l5yJ97LHttR/iMcnGthJhY6oQzGwPg66mkJWfvXO9pVOiiTJThdOM8twDD/tbF7m/SfCerF1zeQg9frygR1+iTzkr+KGolKpbO2k1vEOt/d+jPJQvJM0O2zgvvW6/Fhj76JzzqgECxiGVCURxWKndR89GCtg//uLDWjoY/Eav1wcRh+VGYuqiPqTwmEW5d+m88jDs9s+CZ/5fHszPFnQKwEaPstBDzVgGlXjBm0dSMFZ6M5qfaIzrIWCiUA5MKW3KGOxGn3KN2yxTVMptL6JW9kyxXL0pxI9+8m6QDJIojuugcu7S1K7i00pTqG81Kun4spEC9sl/9kjv8Sce/7N0kPwmCNYr05nCvyHLwtarTUNjjU6pj2WxQ1Kkf1znEVX2c8q0qizhOQwfBxuTHyn/fof3X1NK1YbDOAAgjMOfLXRnM39Ew0gzyxHk7nG3AaDvm8gv5y5N5MfsMrJeq1cugQ+bvIR8cueJ59bgMIhLzrhtzmhNmv16+H0wKY9SIrBOp/PQyMonX0RVKvGBb7Xa6z+Fexq0C0kQywSXhlqP+YJlGS0l2dBCvQxOTk4aOMQWZuyceg98zoXaefKJytWAAQkYQSwXBTDnEtrV+ZcUgeEQD4YurGfkbDG9UsUKfE8lBIwspYnKsqJOkCRNA4zm5sjAQRRVcwcS1lXAzHRgbae04NZyGIYfrsFhEURhOOfmw9dXvXsw8pE+kQ5mAi4wpc2u4Hn5/g/eqSnFONpIyxkShoeTKgnth1ofV/WVpSUkQWwYR52jFgDcx/7Gszz2OfDbzXp1xqhNQgJGEBvGx4eHj2SH1eahTQIfoKoJfCABI4jNA3cgsQIf2GwzA4zQsIWoyieABIwgNgycQsR8HnlWBKY0xLVqTSGahMaqEcQGsf+9HzXAVZups3ZAOUCj0azs8hFIwAhiuWA7UrZh2w6W5aBsZ/upx+Zwvl7Iw8OjLc7Fy5wHoEENq/CzU4g8OAfy4a1GpXf4aQlJEEvkosvAer1e3deu+qr+rGhhiZIpU8K2Jc6eqTdiisAIglgP+v2+qfzXOZGXFS+7AxkG4eCxxx6tnAdYForACGKDwCEe2AOJCXo781FN8X7DKUQS4jiudAIfSMAIYnP4wQ9/VFNScYyyTASmR73c3nlVSgm+wLVWiyvfokcCRhAbwuHh4ZbS+lWeyb1Zs8Tx5SMSBEFlPcCykIARxFLRGRtqNrSV5lN6FLV1r1j4mjw+7jdtj+XIRdY6wWpXlc+NU67xwGfsVq1RowhsUbJ+6SOvJILYNDRWuxsffBQQe8mhqGHjtnvPazu2AbPp6hyXwUlfNTQP7JwArjPCZdx9Ae11lGsvUjrln3/yiUqaGGahCIwgloix157z7rHsgZ/DUlrKhHMzYYhnlpBs+LeUKWCLkcl/xdWPvmClAkYBF3EJ0JnlnGXCVmeiUGzRAVo/fPu9KEmSKHtb1gGWczt9G3cokzSFaAN2IGFVAvbgrX2SL+JSoJWuTxbcW9HKXgJj4+4WqsvqdDotBfql8ceA4ei20SQkZirwG/Va5RP4sCoB01q3V/G4BHHR2NH9eo6KfOYGzPKFBKzX7Tb9ZkHWgx/FajR/kg9bmVrtVuXzX7CyJaTWtIIkLgV2rJyeNVtjiJ8ZWZS33347ODo6agkuMveVbSPSzjrH3hYIcbvZqK6JYZaVtBIppbjOmeDsq4dDEZq/cUuYkyc+UVEYQ0tpnRlqq83AYozKFFMgnGAx/D7apxds5n7rre+3fvzjd54ALl6d3Mn3OS+bidOY5QfzT6bVY5/+jUq3EHnWahcSxStwzaZuaKl5Eeb6+CKIS4hUOpBKvXrGlWX+zwUDqdRGVOB71qqZ26/Zlfva9HTpEKuGP1qDwyOItYNzpoRzl8jD5L+0rcpP0z60Wo9sRP4LViVgGvTU8Z0ME57MpiLNstJYJkmMxJY2np4gLg3aLmVbrVZnU57yWkVgGHnhEhLHTelUm456yIymIgjiNJNFGZPYHUjngS+C263WZuxAwrrlwLAKOVUKlJQm3MWGUxybomRKERhRSRjnp4pVLxrlAgEUsCiKBo9+5rc2IoEPq4rAGO636NNVyIpps4xkxi0Sho2w73/wy698fHTyBzJNHs6d05nzESS46JU52Ztz3s2rlg64KDVSHA5FzTweh3J3ZBXoWiiCD4GXe7+mpglYz9Uj9fxzEKzkaNolIzgw87r4VpxFK9rzH4bNfh8x5poa3T85A5mq2tHRyVNhYAvklbNKNe9783PnP0jTY4m7mpg/ZjoTkYxaiLjJLWtI1QCCIK70FKJJVrqEPDW3TsOwcx+T+XiDCEL4uHP8xQ8PO188c0jBFBhjJdeczZjyMscg9yLw4dtx8n7Xe1dW+217mHLoJcPZxSwimBlRNmPKtf+5sduY6T0MgtDsqI8KTEd6d34U9kACE1Yk5bACXw2buUMegR1ShDuQ0UalY9bcUtp4gVgf7zVZ7a5mOVC1MpLZO2IXRbmv1ez7mropBaO6RoCR8wo3eV41dahHUbByPwit24UpD2cjUffP39aDBSC5gK2thz4+94OuESvbhVzXE3Ieyr44q2ox5D/5875XJrPOUZnnjy0Y6WWf7zJeT6VS3h/0gAUxCB6ajS8FGcHkNtRL0wSSZPB7zXZzYxL4ULWhHou8+ct+02RdLSfxn7RVYxmikneeyn6sWfc367UqyqKvrbdyzh4nLidNYVYJkXW73Tz65K9/4vFeT9ZSKQMpFZdKBlrpl0x+UDHgge1qiaN48Pijn6Yc2KpY94hkseNbJIJY7a7WPORd8BcZpZYplose97RjwI0GVdJr+OhnPqMe/cxn3s7e9qMf/yQ66hz9Wq/fqw8GafTh4cnDaZoE7WZj48qRaKxaQWZdFItdMLN+Z/pFM+txxvZE9OT3Lu4DIO+xzjpH039tlsjn39ciL8eMPH3+65G5efz3tRGqrKkgDDd7Ju+rPLF94vHH0OvrV6Xd4RqzOgHTesqLpk0ychFm7RAVJ2txPZqr533F81hEH8oWxNHxaXc+/deL2XbnH8Oir9NZUeXp/Tw9I9m9iEgJzXK3hPTMc5TzYDOOITAbUf7nRj+I6zhslZOmwZoZy2csg0jzD42Yen5XAjMFquAvNjt5wN4+7NifHzYcllAeo/eaP0ZjCVf6ySozTwNjgsPM+eUsY2630Dma8TulRnS+7EI7/yr7T+U+NEp9dbEu6ozDKIJyAmsMCSea5GSe5jl/LqkGwEVodgmTfhe03MwNrmWxMjsdobDI1H0iu6K+sYuvILzk5VH2fTfS1MUlLHdXruTEf/Ys+FPiFjBQ7oe7Lr1a1Edatk7KHzx3NU3lPY6clTdb5BVm2fLRifvODTjROodDiteBdS8wdjpxEGyE1fNFsRIBi6Owx7i6zRhLs0sKtuAbSGvNU6lK/eRCa1+8X8iOaVeaM87UIqWx+QunspPaJrgw9ipYfa0zS7Kya+lQe8sLIPXUxL8e14dSUDDLveF8r4ea8LzP1UqpTNscaDQr0C4i1LtKSQ4FPcEuM2zVfVpEufzk3QNzuTM9zNtZET7lwz4feTkrjKITmQYcFvNwn4q7dKVWHJTmrgiUn92uXAwpZTCjTm1huTTnW2mutBqe8zyLaHxeI4tn/LC0z/POM7c/KO2JXgJIwAiCqCyUMCQIorKQgBEEUVlIwAiCqCwkYARBVBYSMIIgKgsJGEEQlYUEjCCIykICRhBEZSEBIwiispCAEQRRWUjACIKoLCRgBEFUFhIwgiAqCwkYQRCVhQSMIIjKQgJGEERlIQEjCKKykIARBFFZSMAIgqgsJGAEQVQWEjCCICoLCRhBEJWFBIwgiMpCAkYQRGUhASMIorKQgBEEUVlIwAiCqCwkYARBVBYSMIIgKgsJGEEQlYUEjCCIykICRhBEZSEBIwiispCAEQRRWUjACIKoLCRgBEFUFhIwgiAqCwkYQRCVhQSMIIjKQgJGEERlIQEjCKKykIARBFFZSMAIgqgsJGAEQVQTAPj/Q19FvsGIkkEAAAAASUVORK5CYII=\"\n \n category_ = str(request.get_json()['category'].encode('utf-8').strip())\n user_ = data['User']\n _photo_ = convert_and_save_image(photo_, str(user_),'desafio')\n aims_ = eval(aims_)\n _aims = ''\n length_aims = len(aims_)\n c = 0\n for x in aims_:\n if c + 1 == length_aims:\n _aims = _aims + str(x.encode('utf-8').strip()).split(\"'value': u'\")[1].split(\"'}\")[0]\n else:\n _aims = _aims + str(x.encode('utf-8').strip()).split(\"'value': u'\")[1].split(\"'}\")[0] + ','\n c = c + 1\n code = 200\n bool_, _token_ = new_challenge_p(title_, summary_, description_, _aims, _photo_, user_ ,category_)\n if bool_:\n id_challenge_ =get_id_challenge_by_token(_token_)\n new_permission(id_challenge_, user_)\n message = 'The class was correctly entered'\n else:\n message = 'Something went wrong'\n code = 400\n except Exception as e:\n message = 'Internal error'\n code = 500\n if 'title' in e:\n message = 'You have not entered a title'\n code = 400\n elif 'summary' in e:\n message = 'You have not entered a summary'\n code = 400\n elif 'description' in e:\n message = 'You have not entered a description'\n code = 400\n elif 'aims' in e:\n message = 'You have not entered a aims'\n code = 400\n elif 'category' in e:\n message = 'You have not entered a category'\n code = 400\n print e\n insert_general_record('challenge/new', \n {'data': message, 'code': code, 'idchallenge': id_challenge_}\n ,data['User']) \n return jsonify({'data': message, 'code': code})\n\[email protected]('/class/new', methods=['POST'])\n@token_required\ndef new_class_professor():\n \"\"\"\n Receive [school identificador class year]\n Return: message with token (Only if the information entered is correct)\n \"\"\"\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n if (data['Rol'] == 'Professor'):\n try:\n code = 400\n school_ = str(request.get_json()['school'].encode('utf-8').strip())\n identificator_ = str(request.get_json()['identificator'].encode('utf-8').strip())\n class_ = str(request.get_json()['class'].encode('utf-8').strip())\n year_ = str(request.get_json()['year'].encode('utf-8').strip())\n FK_owner_nick = str(data['User'].encode('utf-8').strip())\n result_bool, result_text = new_class(school_, identificator_, class_, year_, FK_owner_nick)\n message = result_text\n if result_bool:\n code = 200\n else:\n code = 400\n except:\n message = 'Internal error'\n code = 500\n insert_general_record('class/new', \n {'data': message, 'code': code}\n ,data['User']) \n return jsonify({'data': message, 'code': code})\n\[email protected]('/get_all_classes', methods=['GET'])\n@token_required\ndef get_all_classes_p():\n \"\"\"\n Method to get all data about the Professor's classes\n Return: [id,school,identificator,class, year, status]\n \"\"\"\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n classes = []\n if (data['Rol'] == 'Professor'):\n classes = get_all_classes_professor(str(data['User'].encode('utf-8').strip()))\n message = 'Ok'\n code = 200\n insert_general_record('get_all_classes', \n {'message': message, 'classes': len(classes),'code': code}\n ,data['User'])\n return jsonify({'message': message, 'classes': classes,'code': code})\n\[email protected]('/class/add_student', methods=['POST'])\n@token_required\ndef add_to_class_p():\n \"\"\"\n Receive a user (student) to add a specific class\n Return: message with token (Only if the information entered is correct)\n \"\"\"\n \n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n student = \"-\"\n class_ = \"-\"\n if (data['Rol'] == 'Professor'):\n try:\n student = str(request.get_json()['student'].encode('utf-8').strip())\n class_ = str(request.get_json()['class'].encode('utf-8').strip())\n code = 400\n ismyclass_bool = it_s_my_class_professor(data['User'],class_)\n if ismyclass_bool:\n resp_bool, resp_text = add_student_to_class_professor(class_,student)\n message = resp_text\n if resp_bool:\n code = 200\n else:\n code = 400\n else:\n message = 'You are not allowed to do something in this class'\n except Exception as e:\n print e\n message = 'Internal error'\n code = 500\n insert_general_record('class/add_student', \n {'data': message, 'code': code, 'student':student, 'class': class_}\n ,data['User'])\n return jsonify({'data': message, 'code': code})\n\[email protected]('/class/remove_student', methods=['POST'])\n@token_required\ndef remove_to_class_p():\n \"\"\"\n Receive a user (student) to add a specific class\n Return: message with token (Only if the information entered is correct)\n \"\"\"\n \n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n student = \"-\"\n class_ = \"-\"\n if (data['Rol'] == 'Professor'):\n try:\n student = str(request.get_json()['student'].encode('utf-8').strip())\n class_ = str(request.get_json()['class'].encode('utf-8').strip())\n code = 400\n ismyclass_bool = it_s_my_class_professor(data['User'],class_)\n if ismyclass_bool:\n message = \"it's my class\"\n resp_bool, resp_text = remove_student_to_class_professor(class_,student)\n message = resp_text\n if resp_bool:\n code = 200\n else:\n code = 400\n else:\n message = 'You are not allowed to do something in this class'\n except Exception as e:\n print e\n message = 'Internal error'\n code = 500\n insert_general_record('class/remove_student', \n {'data': message, 'code': code, 'student':student, 'class': class_}\n ,data['User'])\n return jsonify({'data': message, 'code': code})\n\n\[email protected]('/class/add_challenge', methods=['POST'])\n@token_required\ndef add_challenge_to_class_p_():\n \"\"\"\n Receive a user (student) to add a specific class\n Return: message\n \"\"\"\n \n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n idchallenge_ = -1\n idclass_ = -1\n if (data['Rol'] == 'Professor'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n idclass_ = str(request.get_json()['idclass'].encode('utf-8').strip())\n code = 400\n ismyclass_bool = it_s_my_class_professor(data['User'],idclass_)\n if ismyclass_bool:\n message = \"it's my class\"\n resp_bool, resp_text = add_challenge_to_class_professor(idclass_,idchallenge_)\n message = resp_text\n if resp_bool:\n code = 200\n else:\n code = 500\n else:\n message = 'You are not allowed to do something in this class'\n except Exception as e:\n print e\n message = 'Internal error'\n code = 500\n insert_general_record('class/add_challenge', \n {'data': message, 'code': code, 'idchallenge':idchallenge_, 'idclass':idclass_}\n ,data['User'])\n return jsonify({'data': message, 'code': code})\n\[email protected]('/class/remove_challenge', methods=['POST'])\n@token_required\ndef remove_challenge_to_class_p_():\n \"\"\"\n Receive a user (student) to remove in a specific class\n Return: message\n \"\"\"\n \n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n idchallenge_ = -1\n idclass_ = -1\n if (data['Rol'] == 'Professor'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n idclass_ = str(request.get_json()['idclass'].encode('utf-8').strip())\n code = 400\n ismyclass_bool = it_s_my_class_professor(data['User'],idclass_)\n if ismyclass_bool:\n message = \"it's my class\"\n resp_bool, resp_text = remove_challenge_to_class_professor(idclass_,idchallenge_)\n message = resp_text\n if resp_bool:\n code = 200\n else:\n code = 500\n else:\n message = 'You are not allowed to do something in this class'\n except Exception as e:\n print e\n message = 'Internal error'\n code = 500\n insert_general_record('class/remove_challenge', \n {'data': message, 'code': code, 'idchallenge': idchallenge_, 'idclass': idclass_}\n ,data['User'])\n return jsonify({'data': message, 'code': code})\n\[email protected]('/class/edit_info', methods=['POST'])\n@token_required\ndef edit_info_p():\n \"\"\"\n Receive a class's data of a specific id number (INT) and info to edit\n Return: message\n \"\"\"\n \n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n id_class_ = -1\n if (data['Rol'] == 'Professor'):\n try:\n id_class_ = str(request.get_json()['id_number'].encode('utf-8').strip())\n school_ = str(request.get_json()['school'].encode('utf-8').strip())\n identificator_ = str(request.get_json()['identificator'].encode('utf-8').strip())\n class_ = str(request.get_json()['class'].encode('utf-8').strip())\n year_ = str(request.get_json()['year'].encode('utf-8').strip())\n code = 400\n resp_bool , resp_text = edit_class_info_professor(id_class_,school_,identificator_,class_,year_, data['User'])\n message = resp_text\n if resp_bool:\n code = 200\n else:\n code = 403\n message = 'You are not allowed to access this page'\n except Exception as e:\n print e\n message = 'Internal error'\n code = 500\n insert_general_record('class/edit_info', \n {'data': message, 'code': code, 'idclass': id_class_}\n ,data['User'])\n return jsonify({'data': message, 'code': code})\n\[email protected]('/class/edit_challenge', methods=['POST'])\n@token_required\ndef edit_challenge_to_class_p_():\n\n \"\"\"\n Receive a challenge's data of a specific id number (INT)\n Return: message\n \"\"\"\n \n\n\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n idchallenge_ = -1\n if (data['Rol'] == 'Professor'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n if (itwasintializedbyanystudent(idchallenge_)):\n return jsonify({'data': \"The challenge cannot edit , because was init by one or more student \", 'code': 403})\n title_ = str(request.get_json()['title'].encode('utf-8').strip())\n #photourl_ = str(request.get_json()['photourl'])\n summary_ = str(request.get_json()['summary'].encode('utf-8').strip())\n description_ = str(request.get_json()['description'].encode('utf-8').strip())\n aim_ = str(request.get_json()['aim'].encode('utf-8').strip())\n fk_category = str(request.get_json()['category'].encode('utf-8').strip())\n code = 400\n resp_bool , resp_text = edit_challenge_professor(idchallenge_,title_,summary_,description_,aim_,fk_category, data['User'])\n #resp_bool , resp_text = edit_challenge_professor(idchallenge_,title_,photourl_,summary_,description_,aim_,fk_category, data['User'])\n message = resp_text\n if resp_bool:\n code = 200\n else:\n code = 500\n except Exception as e:\n print e\n message = 'Internal error'\n code = 500\n insert_general_record('class/edit_challenge', \n {'data': message, 'code': code, 'idchallenge': idchallenge_}\n ,data['User'])\n return jsonify({'data': message, 'code': code})\n\n############################## END ONLY PROFESSOR #############################\n\n############################## ONLY STUDENT #############################\[email protected]('/search_bing/<id_challenge>/<query>' , methods=['GET'])\n@token_required\ndef search_by_bing(id_challenge,query):\n \"\"\"\n Receive a query by uri , with this method the user can search information using de Bing's engine\n Return: the results with [Title,description,Url]\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n if rol_ == 'Student':\n code = 500\n try:\n challenge = get_all_challenges_student(user_)\n challenge_allowed = False\n for x in challenge:\n if str(x[0]) == str(id_challenge):\n challenge_allowed = True\n if not challenge_allowed:\n insert_general_record('search_bing/idchallenge/query', \n {'message': 'Challenge not allowed', 'code': 403 , 'query': query}\n ,user_)\n return jsonify({'message': 'Challenge not allowed', 'code': 403})\n bool_, finalized = challenge_is_initialized(id_challenge,user_)\n #Finalized -> 0:No , 1:Yes\n if int(finalized[0]) == 1:\n insert_general_record('search_bing/idchallenge/query', \n {'message': 'The challenge is already finished', 'code': 403 , 'query': query}\n ,user_)\n return jsonify({'message': 'The challenge is already finished', 'code': 403})\n code = 200\n #with open(\"/var/www/api_gonsa2/bingjson.json\") as json_data:\n # message = json.load(json_data)\n response = search_term_bing(query)\n response_json = json.dumps(response, )\n message = response\n date_request = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n log_student_queries(user_,id_challenge,query,date_request)\n log_id_number = int(get_id_number_query(user_,id_challenge,query,date_request)[0])\n array_results = json.loads(response_json)['webPages']['value']\n position = 1\n for result in array_results:\n insert_result_query(log_id_number, position, result['language'], result['name'], result['snippet'], result['url'])\n position = position + 1\n try:\n array_related_searchs = []\n position = 1\n for related_search_text in array_related_searchs:\n insert_related_search_query(log_id_number,position, related_search_text['text'])\n position = position + 1\n except Exception as e:\n print e\n print '*'\n \n \n except Exception as e:\n print e\n message = str(e)\n code = 500\n if '400 ' in str(e):\n message = 'Query parameters is missing or not valid (Bing)'\n code = 400\n elif '401 ' in str(e):\n message = 'Unauthorized (Bing)'\n code = 401\n elif '403 ' in str(e):\n message = 'Forbidden (Bing)'\n code = 403\n elif '410 ' in str(e):\n message = 'Used HTTP instead of the HTTPS protocol (Bing)'\n code = 410\n elif '429 ' in str(e):\n message = 'Exceeded their queries per second quota (Bing)'\n code = 429\n else:\n message = 'Only students allowed'\n code = 403\n insert_general_record('search_bing/idchallenge/query', \n {'message': message, 'code': code , 'query': query}\n ,user_)\n return jsonify({'message': message, 'code': code})\n\[email protected]('/library/add', methods=['POST'])\n@token_required\ndef library_add_resource_():\n \"\"\"\n Receive a challenge's data of a specific id number (INT)\n Return: message\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n message = 'You are not allowed to access this page'\n code = 500\n if (rol_ == 'Student'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n text_ = str(request.get_json()['text'].encode('utf-8').strip())\n url = str(request.get_json()['url'].encode('utf-8').strip())\n # Check permission over challenge\n challenge = get_all_challenges_student(user_)\n challenge_allowed = False\n for x in challenge:\n if str(x[0]) == str(idchallenge_):\n challenge_allowed = True\n if not challenge_allowed:\n insert_general_record('library/add', \n {'message': 'Challenge not allowed', 'code': 403}\n ,user_)\n return jsonify({'message': 'Challenge not allowed', 'code': 403})\n # END Check permission over challenge\n # Check if exist a similar resource\n response = check_is_in_text_library(user_,idchallenge_, text_, url)\n if response:\n insert_general_record('library/add', \n {'message': 'The resource was previously registered', 'code': 400}\n ,user_)\n return jsonify({'message': 'The resource was previously registered', 'code': 400})\n\n # END Check if exist a similar resource\n date_request = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n bool_ = add_text_library(user_,idchallenge_, text_, url, date_request, 0)\n if bool_:\n code = 200\n message = 'The resource has been successfully registered'\n else:\n code = 400\n message = 'Something went wrong'\n except Exception as e:\n print e\n code = 500\n else:\n message = 'Only students allowed'\n code = 403\n insert_general_record('library/add', \n {'message': message, 'code': code}\n ,user_)\n return jsonify({'message': message, 'code': code})\n\n\[email protected]('/library/accion', methods=['POST'])\n@token_required\ndef library_remove_resource_():\n \"\"\"\n Receive a challenge's data of a specific id number (INT)\n Return: message\n \"\"\"\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n message = 'You are not allowed to access this page'\n code = 500\n action_ = '-'\n if (rol_ == 'Student'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n text_ = str(request.get_json()['text'].encode('utf-8').strip())\n url = str(request.get_json()['url'].encode('utf-8').strip())\n action_ = str(request.get_json()['action'].encode('utf-8').strip())\n if 'block' == action_.lower():\n number_accion = 0\n elif 'hide' == action_.lower():\n number_accion = 1\n elif 'remove' == action_.lower():\n number_accion = 2\n else:\n insert_general_record('library/accion', \n {'message': 'Action not entered or not allowed', 'code': 400 , 'action': action_.lower()}\n ,user_)\n return jsonify({'message': 'Action not entered or not allowed', 'code': 400})\n # Check permission over challenge\n challenge = get_all_challenges_student(user_)\n challenge_allowed = False\n for x in challenge:\n if str(x[0]) == str(idchallenge_):\n challenge_allowed = True\n if not challenge_allowed:\n insert_general_record('library/accion', \n {'message': 'Challenge not allowed', 'code': 403 , 'action': action_.lower()}\n ,user_)\n return jsonify({'message': 'Challenge not allowed', 'code': 403})\n # END Check permission over challenge\n # Check if exist a similar resource\n response = check_is_in_text_library(user_,idchallenge_, text_, url)\n if response:\n bool_ = update_text_library(user_,idchallenge_, text_, url, number_accion)\n if bool_:\n code = 200\n message = 'The resource has been successfully modified'\n insert_general_record('library/accion', \n {'url': url, 'text': text_ , 'idchallenge': idchallenge_, 'code': 200 , 'action': action_.lower()},user_)\n return jsonify({'message': message, 'code': code})\n else:\n code = 400\n message = 'Something went wrong'\n except Exception as e:\n print e\n code = 500\n else:\n message = 'Only students allowed'\n code = 403\n insert_general_record('library/accion', \n {'message': message, 'code': code , 'action': action_.lower()}\n ,user_)\n return jsonify({'message': message, 'code': code})\n\n\[email protected]('/getallmylibrary/<id_challenge>', methods=['GET'])\n@token_required\ndef getallmylibrary_(id_challenge):\n message = 'You are not allowed to access this page'\n code = 403\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n if (rol_ == 'Student'):\n message = getallmylibrary_by_challenge(user_,id_challenge)\n code = 200\n insert_general_record('getallmylibrary/[id]', \n {'message': message, 'code': code, 'idchallenge': id_challenge}\n ,user_)\n return jsonify({'message': message, 'code': code})\n\n\[email protected]('/finish_challenge', methods=['POST'])\n@token_required\ndef close_challenge_():\n message = 'You are not allowed to access this page'\n code = 403\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n idchallenge_ = -1\n solution = '-'\n if (rol_ == 'Student'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n solution = str(request.get_json()['solution'].encode('utf-8').strip())\n # Check permission over challenge\n challenge = get_all_challenges_student(user_)\n challenge_allowed = False\n for x in challenge:\n if str(x[0]) == str(idchallenge_):\n challenge_allowed = True\n if not challenge_allowed:\n return jsonify({'message': 'Challenge not allowed', 'code': 400})\n # END Check permission over challenge\n # Check if the challenge is finished\n _bool , _sent_date = check_challenge_is_finished(idchallenge_, user_)\n if _bool:\n insert_general_record('finish_challenge', \n {'message': 'The final solution of the challenge has already been sent at ' + str(_sent_date), 'code': 403 , 'idchallenge':idchallenge_, 'response':solution}\n ,user_)\n return jsonify({'message': 'The final solution of the challenge has already been sent at ' + str(_sent_date), 'code': 403})\n date_request = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n bool_ , message = finish_challenge(idchallenge_, user_, solution, date_request)\n if bool_:\n code = 200\n else:\n code = 400\n #\n except Exception as e:\n raise e\n insert_general_record('finish_challenge', \n {'message': message, 'code': code , 'idchallenge':idchallenge_, 'response':solution}\n ,user_)\n return jsonify({'message': message, 'code': code})\n\n\[email protected]('/new_response', methods=['POST'])\n@token_required\ndef new_response_():\n message = 'You are not allowed to access this page'\n code = 403\n data = get_info_token()\n user_ = data['User']\n rol_ = data['Rol']\n idchallenge_ = -1\n solution = '-'\n last_response = '-'\n if (rol_ == 'Student'):\n try:\n idchallenge_ = str(request.get_json()['idchallenge'])\n solution = str(request.get_json()['solution'].encode('utf-8').strip())\n # Check permission over challenge\n challenge = get_all_challenges_student(user_)\n challenge_allowed = False\n for x in challenge:\n if str(x[0]) == str(idchallenge_):\n challenge_allowed = True\n if not challenge_allowed:\n return jsonify({'message': 'Challenge not allowed', 'code': 400})\n # END Check permission over challenge\n # Check if the challenge is finished\n _bool , _sent_date = check_challenge_is_finished(idchallenge_, user_)\n if _bool:\n insert_general_record('new_response', \n {'message': 'The final solution of the challenge has already been sent at ' + str(_sent_date), 'code': 403 , 'idchallenge':idchallenge_, 'response':solution}\n ,user_)\n return jsonify({'message': 'The final solution of the challenge has already been sent at ' + str(_sent_date), 'code': 403})\n last_response = str(Get_last_response_challenge(idchallenge_, user_)[0])\n bool_ = new_response(solution, idchallenge_, user_)\n if bool_:\n code = 200\n message = 'The response was entered successfully'\n else:\n code = 400\n message = 'Data entered with errors'\n #\n except Exception as e:\n raise e\n \n insert_general_record('new_response', \n {'message': message, 'code': code , 'idchallenge':idchallenge_, 'prev_response':last_response , 'response':solution}\n ,user_)\n return jsonify({'message': message, 'code': code})\n\[email protected]('/reg_event', methods=['POST'])\n@token_required\ndef reg_event_():\n user_ = get_info_token()['User']\n try:\n data_ = str(request.get_json()['data'].encode('utf-8').strip())\n insert_general_record('event_registered', data_,user_)\n return jsonify({'message': 'ok', 'code': 200})\n except Exception as e:\n print 'eee'\n print e\n return jsonify({'message': 'Bad request', 'code': 401})\n \n\n############################## END ONLY STUDENT #############################\n\n############################## ONLY ADMINISTRADOR #############################\[email protected]('/a_getallusers', methods=['GET'])\n@token_required\ndef a_getallusers():\n data = get_info_token()\n message = 'You are not allowed to access this page'\n code = 403\n users = []\n if (data['Rol'] == 'Administrador'):\n users = a_get_all_users()\n message = 'Ok'\n code = 200\n insert_general_record('a_getallusers', \n {'message': message, 'users': len(users),'code': code}\n ,data['User'])\n return jsonify({'message': message, 'users': users,'code': code})\n############################## END ONLY ADMINISTRADOR #############################\n\n\nif __name__ == '__main__':\n app.run(debug=c.get_api_debug(),\n host=c.get_api_host(), port=c.get_api_port())\n" }, { "alpha_fraction": 0.6315740942955017, "alphanum_fraction": 0.6332378387451172, "avg_line_length": 37.64285659790039, "blob_id": "fa0bae95ca977f4ee35c1333a1f72c8bc93ec902", "content_id": "aadb42a4ad596d4361d88eedcbafa4a67fbb8dd0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10819, "license_type": "permissive", "max_line_length": 320, "num_lines": 280, "path": "/models/challenges.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport mysql.connector\nfrom datetime import datetime\nfrom settings.config import Database, Config\n\n\ndef itwasintializedbyanystudent(idchallenge):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT FK_challenge_id_number FROM Challenge_last_activity WHERE FK_challenge_id_number = %s\"\n data = (idchallenge,)\n cursor.execute(query,data)\n for (FK_challenge_id_number) in cursor:\n return True\n cursor.close()\n cnx.close()\n return False\n\ndef get_all_challenges_p(nick):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category FROM all_challenges WHERE owner_fk_nick = %s\"\n data = (nick,)\n cursor.execute(query,data)\n r = []\n for (id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category) in cursor:\n r.append([id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category])\n cursor.close()\n cnx.close()\n return r\n\ndef get_all_challenges_in_class(idclass_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT ac.id_number, ac.title , ac.photourl, ac.summary, ac.description, ac.aim, ac.created, ac.last_edit, ac.owner_fk_nick, ac.fk_category FROM class_challenges as cc INNER JOIN all_challenges as ac WHERE cc.FK_class_id_number = %s AND cc.FK_challenge_id_number = ac.id_number order by ac.created DESC\"\n data = (idclass_,)\n cursor.execute(query, data)\n r = []\n for (id_number, title, photourl, summary, description, aim, created, last_edit, owner_fk_nick, fk_category) in cursor:\n temp = [id_number, title, photourl, summary, description, aim, created, last_edit, owner_fk_nick, fk_category]\n r.append(temp)\n cursor.close()\n cnx.close()\n return r\n except Exception as e:\n print e\n\ndef get_challenge_by_id_p(nick, id_number):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category FROM all_challenges WHERE owner_fk_nick = %s AND id_number=%s\"\n data = (nick, id_number,)\n cursor.execute(query,data)\n r = []\n for (id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category) in cursor:\n r.append([id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category])\n return True , r\n cursor.close()\n cnx.close()\n return False, r\n\ndef get_all_challenges_p_by_cat(nick, category):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category FROM all_challenges WHERE owner_fk_nick = %s AND fk_category = %s\"\n data = (nick,category,)\n cursor.execute(query,data)\n r = []\n for (id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category) in cursor:\n r.append([id_number,token_challenge,title,photourl,summary,description,aim,created,last_edit,fk_category])\n cursor.close()\n cnx.close()\n return r\n\ndef challenge_is_initialized(id_challenge_, nick_student_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT finalized FROM Challenge_last_activity WHERE FK_challenge_id_number = %s AND FK_student_nick = %s\"\n data = (id_challenge_, nick_student_,)\n cursor.execute(query,data)\n for (finalized) in cursor:\n if int(finalized[0]) == 0:\n add_interaction(id_challenge_, nick_student_)\n return True , finalized\n # if finalized -> 0: NO | 1: YES\n cursor.close()\n cnx.close()\n challenge_init(id_challenge_, nick_student_)\n return False, (u'0',)\n\n\ndef challenge_init(id_challenge_, nick_student_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO Challenge_last_activity(FK_student_nick ,FK_challenge_id_number ,last_response, number_of_interaction, init_date, finalized) VALUES (%s, %s, %s, %s, %s,%s);\"\n data = (nick_student_,id_challenge_, '', 1, now, 0)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef Get_last_response_challenge(id_challenge_, nick_student_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT last_response FROM Challenge_last_activity WHERE FK_challenge_id_number = %s AND FK_student_nick = %s\"\n data = (id_challenge_, nick_student_,)\n cursor.execute(query,data)\n for (last_response) in cursor:\n return last_response\n cursor.close()\n cnx.close()\n return (u'-',)\n\n\ndef new_response(response_ ,id_challenge_, nick_student_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"UPDATE Challenge_last_activity SET number_of_interaction = number_of_interaction + 1, last_response=%s WHERE FK_challenge_id_number =%s AND FK_student_nick =%s AND finalized = 0\"\n data = (response_,id_challenge_, nick_student_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef check_challenge_is_finished(id_challenge_, nick_student_):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT finalized, end_date FROM Challenge_last_activity WHERE FK_challenge_id_number = %s AND FK_student_nick = %s\"\n data = (id_challenge_, nick_student_,)\n cursor.execute(query,data)\n for (finalized, end_date) in cursor:\n if int(finalized[0]) == 1:\n return True , end_date\n return False , '-'\n # if finalized -> 0: NO | 1: YES\n cursor.close()\n cnx.close()\n return False , '-'\n\n\ndef finish_challenge(id_challenge_, nick_student_, last_response, date_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"UPDATE Challenge_last_activity SET last_response =%s, finalized = %s, end_date=%s WHERE FK_challenge_id_number =%s AND FK_student_nick =%s;\"\n data = (last_response, '1', date_, id_challenge_, nick_student_,)\n cursor.execute(query, data)\n cnx.commit()\n row = cursor.rowcount\n cnx.close()\n if row >= 1:\n add_interaction(id_challenge_, nick_student_)\n return True , 'The challenge has been finished'\n else:\n return True , 'No changes detected in the challenge'\n except Exception as e:\n print e\n return False, 'Something went wrong'\n\ndef add_interaction(id_challenge_, nick_student_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"UPDATE Challenge_last_activity SET number_of_interaction = number_of_interaction + 1 WHERE FK_student_nick = %s AND FK_challenge_id_number=%s\" \n data = (nick_student_,id_challenge_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\n\ndef log_student_queries(nick_student_, id_challenge_ , query_, now_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"INSERT INTO Students_query(FK_student_nick ,FK_challenge_id_number ,query, date_executed ) VALUES (%s, %s, %s, %s);\"\n data = (nick_student_, id_challenge_, query_, now_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef get_id_number_query(nick_student_, id_challenge_ , query_, now):\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number FROM Students_query WHERE FK_student_nick=%s AND FK_challenge_id_number =%s AND query =%s AND date_executed =%s;\"\n data = (nick_student_, id_challenge_ , query_, now,)\n cursor.execute(query,data)\n for (id_number) in cursor:\n return id_number\n cursor.close()\n cnx.close()\n return [0]\n\ndef insert_result_query(FK_id_number_student_query, position_, lang_, title_, snippet_, url_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"INSERT INTO Students_queries_results(FK_student_query_id_number ,position , lang, title, snippet, url ) VALUES (%s, %s, %s, %s, %s, %s);\"\n data = (FK_id_number_student_query, position_, lang_, title_, snippet_, url_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef insert_related_search_query(FK_id_number_student_query, position_, query_text):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"INSERT INTO Students_related_search(FK_student_query_id_number ,position , textquery ) VALUES (%s, %s, %s);\"\n data = (FK_id_number_student_query, position_, query_text,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False" }, { "alpha_fraction": 0.6476621627807617, "alphanum_fraction": 0.650377094745636, "avg_line_length": 33.894737243652344, "blob_id": "4878ed6c18608fd3b5938c28bd7a3f2714cee81b", "content_id": "45bb6a8c9fd3de2214990d1546840c7021f35694", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3315, "license_type": "permissive", "max_line_length": 84, "num_lines": 95, "path": "/functions/email_sender.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport smtplib\nimport os.path\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\nfrom email.MIMEImage import MIMEImage\nfrom settings.config import Config, emailconfig\n\ndef root_dir(): # pragma: no cover\n return os.path.abspath(os.path.dirname(__file__))\n\ndef type_of_emails(request_type):\n\temail_content = ''\n\tn_parameters_need = -1\n\tif request_type == 'new_account':\n\t\t# 5 parameters\n\t\t# Platform name, Platform name, Url to login, User, Password\n\t\tn_parameters_need = 5\n\t\tfound = True\n\t\twith open(os.path.join(root_dir(), 'emails/new_account.html')) as file:\n\t\t\temail_content = file.read().replace('\\n', '')\n\telif request_type == 'pass_changed':\n\t\t# 6 parameters\n\t\t# Platform name, name , username, ip, datetime, email contact\n\t\tn_parameters_need = 6\n\t\tfound = True\n\t\twith open(os.path.join(root_dir(), 'emails/pass_changed.html')) as file:\n\t\t\temail_content = file.read().replace('\\n', '')\n\telif request_type == 'password_recovery':\n\t\t# 5 parameters\n\t\t# Platform name, username, url+token, ip, datetime\n\t\tn_parameters_need = 5\n\t\tfound = True\n\t\twith open(os.path.join(root_dir(), 'emails/password_recovery.html')) as file:\n\t\t\temail_content = file.read().replace('\\n', '')\n\telse:\n\t\tfound = False\n\treturn email_content, found, n_parameters_need\n\n\n\n\ndef email_to_send(parameters_, type_email, Subject, destine_email):\n try:\n \temail_conf = emailconfig()\n \tconfig = Config()\n strFrom = email_conf.get_user()\n strTo = destine_email\n\n msgRoot = MIMEMultipart('related')\n msgRoot['Subject'] = Subject + ' - ' + config.get_platform_name()\n msgRoot['From'] = strFrom\n msgRoot['To'] = strTo\n msgRoot.preamble = 'This is a multi-part message in MIME format.'\n\n msgAlternative = MIMEMultipart('alternative')\n msgRoot.attach(msgAlternative)\n\n msgText = MIMEText('This is the alternative plain text message.')\n msgAlternative.attach(msgText)\n\n # We reference the image in the IMG SRC attribute by the ID we give it below\n email_content, found, n_parameters = type_of_emails(type_email)\n print len(parameters_)\n if not found:\n \treturn [False, 'Type of email not found']\n if len(parameters_) != n_parameters:\n \treturn [False, 'The number of parameters entered are not valid']\n final_email = email_content.format(*parameters_)\n msgText = MIMEText(final_email, 'html')\n msgAlternative.attach(msgText)\n\n # This example assumes the image is in the current directory\n fp = file(os.path.join(root_dir(), 'emails/logo.png'), 'rb')\n msgImage = MIMEImage(fp.read(), _subtype=\"png\")\n\n fp.close()\n\n # Define the image's ID as referenced above\n msgImage.add_header('Content-ID', '<image1>')\n msgRoot.attach(msgImage)\n\n # Send the email (this example assumes SMTP authentication is required)\n smtp = smtplib.SMTP_SSL(email_conf.get_url(), email_conf.get_port())\n smtp.ehlo()\n smtp.login(strFrom, email_conf.get_pass())\n smtp.sendmail(strFrom, strTo, msgRoot.as_string())\n\n smtp.quit()\n print \n return [True, 'enviado******']\n except Exception as e:\n \tprint e\n \treturn [False, 'NOOO ENVIADO']\n" }, { "alpha_fraction": 0.674265444278717, "alphanum_fraction": 0.7117527723312378, "avg_line_length": 32.440677642822266, "blob_id": "dd229df6f96c96593f931d8c1dddff3edd2bd87d", "content_id": "549814b612440c8a796c9cc499011ab51d30180b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1974, "license_type": "permissive", "max_line_length": 120, "num_lines": 59, "path": "/functions/general_functions.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import bcrypt\nimport hashlib, binascii\nimport random, string\nimport base64\nimport os \nUPLOAD_FOLDER = '/var/www/api_gonsa2/static/uploads/'\n\ndef random_salt():\n return bcrypt.gensalt()\n\ndef encrypt_pass(passw , salt):\n\tdk = hashlib.pbkdf2_hmac('sha256', passw, salt, 9503)\n\treturn binascii.hexlify(dk)\n\ndef get_random(number):\n return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(number))\n\ndef get_random_num(number):\n return ''.join(random.choice(string.digits) for x in range(number))\n\ndef get_random_str(number):\n return ''.join(random.choice(string.ascii_letters) for x in range(number))\n\ndef convert_and_save_image(b64_string, user_, class_):\n\ttry:\n\n\t\timagenbase64 = b64_string[b64_string.find(\",\")+1:]\n\t\timage_url = get_random(10)\n\t\tif not os.path.exists(UPLOAD_FOLDER +class_):\n\t\t\tos.makedirs(UPLOAD_FOLDER +class_)\n\t\tif not os.path.exists(UPLOAD_FOLDER + str(class_+'/'+user_)):\n\t\t\tos.makedirs(UPLOAD_FOLDER +str(class_+'/'+user_))\n\n\t\twith open(UPLOAD_FOLDER+ str(class_+'/'+user_+'/'+image_url)+\".png\", \"wb+\") as fh:\n\t\t\tfh.write(base64.decodestring(imagenbase64.encode('ascii')))\n\t\treturn str('static/uploads/'+class_+'/'+user_+'/'+image_url)+\".png\"\n\texcept Exception as e:\n\t\tprint e\n \n\ndef search_term_bing(query):\n\timport requests\n\timport json\n\t\n\tsubscription_key = 'b3d290afca044f65bc0601f2fbf5df04'\n\t#Key1: b3d290afca044f65bc0601f2fbf5df04\n\t#Key2: cd7124bd63674919b13d8702f656124e\n\tassert subscription_key\n\tsearch_url = \"https://api.cognitive.microsoft.com/bing/v7.0/search\"\n\t#https://api.cognitive.microsoft.com/bing/v7.0/search[?q][&count][&offset][&mkt][&safesearch]\n\t#https://dev.cognitive.microsoft.com/docs/services/f40197291cd14401b93a478716e818bf/operations/56b4447dcf5ff8098cef380d\n\theaders = {\"Ocp-Apim-Subscription-Key\" : subscription_key}\n\tparams = {\"q\": query, \"count\":50, \"textDecorations\":True, \"textFormat\":\"HTML\"}\n\tresponse = requests.get(search_url, headers=headers, params=params)\n\tresponse.raise_for_status()\n\treturn response.json()\n\nclass MyException(Exception):\n pass\n\n" }, { "alpha_fraction": 0.5603745579719543, "alphanum_fraction": 0.5677673816680908, "avg_line_length": 21.55555534362793, "blob_id": "3a985921ec015d4a7ed6404638ec5acd70e3cf73", "content_id": "8ffc1cc883b809d89adad646891a22ade7c7ca06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2029, "license_type": "permissive", "max_line_length": 60, "num_lines": 90, "path": "/settings/config.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import datetime\n\nclass emailconfig():\n\n def __init__(self):\n ####### email settings ########\n self.emailuser = 'XXXXXXXXXXXXXXXXX'\n self.emailpass = 'YYYYYYYYYYYYYYYYY'\n self.emailserurl = 'smtp.gmail.com'\n self.emailserport = 465\n \n\n def get_user(self):\n return self.emailuser\n\n def get_pass(self):\n return self.emailpass\n\n def get_url(self):\n return self.emailserurl\n\n def get_port(self):\n return self.emailserport\n\n\nclass Database:\n\n config = None\n\n def __init__(self):\n # To connect BD\n self.user = 'XXXXXXXXXXXXXXXXX'\n self.password = 'XXXXXXXXXXXXXXXXX'\n self.host = 'localhost'\n self.database_name = 'XXXXXXXXXXXXXXXXX'\n\n # DB\n self.config = {'user': self.user,\n 'password': self.password,\n 'host': self.host,\n 'database': self.database_name}\n\n\nclass Config:\n\n def __init__(self):\n # JWT Settings\n self.api_jwt_key = 'XXXXXXXXXXXXXXXXX'\n self.api_jwt_time = datetime.timedelta(minutes=1800)\n\n # General Settings\n self.platform_name = 'GonSA2'\n\n self.api_host = 'http://tera.uach.cl'\n self.api_port = 8080\n self.api_debug = True\n\n self.web_url = 'http://tera.uach.cl'\n\n self.email_to_contact = '[email protected]'\n\n #How along the token to recovery password is valid\n self.token_recovery_time = 24*7 # hours\n\n def get_jwt_key(self):\n return self.api_jwt_key\n\n def get_jwt_time(self):\n return self.api_jwt_time\n\n def get_platform_name(self):\n return self.platform_name\n\n def get_api_host(self):\n return self.api_host\n\n def get_api_port(self):\n return self.api_port\n\n def get_api_debug(self):\n return self.api_debug\n\n def get_web_url(self):\n return self.web_url\n\n def get_email_contact(self):\n return self.email_to_contact\n\n def get_token_exp(self):\n return self.token_recovery_time" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 16, "blob_id": "8608d661c7975971f712c05dfada7ac4ef1d2a9d", "content_id": "d35a3e66f9932c74febafa756d1134704647430c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "permissive", "max_line_length": 16, "num_lines": 2, "path": "/README.md", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "# gonsa2_backend\n# gonsa2_backend\n" }, { "alpha_fraction": 0.6634944677352905, "alphanum_fraction": 0.6718690395355225, "avg_line_length": 28.200000762939453, "blob_id": "f65577670320e9d5b240aeaa09222f59f3e23623", "content_id": "8ddebff593e10d9dae4d49d46eeca037b9df6ece", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2627, "license_type": "permissive", "max_line_length": 100, "num_lines": 90, "path": "/functions/Verifiers.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import re\nfrom models.users import login_user\n\nclass config_pass:\n\n\tdef __init__(self):\n\t\tself.min_length = 6\n\t\tself.need_number = False\n\t\tself.need_uppercase = False\n\t\tself.need_lowercase = False\n\t\tself.need_symbol = False\n\n\tdef get_min_length(self):\n\t\treturn self.min_length\n\n\tdef get_need_number(self, pass_):\n\t\tif self.need_number:\n\t\t\treturn (re.search(r\"\\d\", pass_) is None)\n\t\treturn False\n\n\tdef get_need_uppercase(self, pass_):\n\t\tif self.need_uppercase:\n\t\t\treturn (re.search(r\"[A-Z]\", pass_) is None)\n\t\treturn False \n\t\n\tdef get_need_lowercase(self, pass_):\n\t\tif self.need_lowercase:\n\t\t\treturn (re.search(r\"[a-z]\", pass_) is None)\n\t\treturn False\n\n\tdef get_need_symbol(self, pass_):\n\t\tif self.need_symbol:\n\t\t\treturn (re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', pass_) is None)\n\t\treturn False\n\n\ndef verify_pass_conditions(pass_):\n\tc = config_pass()\n\tif (len(pass_) < c.get_min_length()):\n\t\t# Checking the length\n\t\treturn False , \"The new password contains less than 6 characters\"\n\telif c.get_need_number(pass_):\n\t\t# Searching for digits\n\t\treturn False , \"Must contain at least one number\"\n\telif c.get_need_symbol(pass_):\n\t\t# Searching for symbols\n\t\treturn False , \"Must contain at least one symbol\"\n\telif c.get_need_lowercase(pass_):\n\t\t# Searching for lowercase\n\t\treturn False , \"Must contain at least one lowercase\"\n\telif c.get_need_uppercase(pass_):\n\t\t# Searching for uppercase\n\t\treturn False , \"Must contain at least one uppercase\"\n\telse:\n\t\t# All ok\n\t\treturn True, \"\"\n\ndef verify_new_pass(user_, old_pass, new_pass1, new_pass2):\n\tc = config_pass()\n\tif (new_pass1 != new_pass2):\n\t\t# Checking the new passwords\n\t\treturn False , \"The new passwords doesn't match\"\n\telif not (login_user(user_, old_pass)):\n\t\t# Checking password \n\t\treturn False , \"The old password is not correct\"\n\telif (len(new_pass1) < c.get_min_length()):\n\t\t# Checking the length\n\t\treturn False , \"The new password contains less than 6 characters\"\n\telif c.get_need_number(new_pass1):\n\t\t# Searching for digits\n\t\treturn False , \"Must contain at least one number\"\n\telif c.get_need_symbol(new_pass1):\n\t\t# Searching for symbols\n\t\treturn False , \"Must contain at least one symbol\"\n\telif c.get_need_lowercase(new_pass1):\n\t\t# Searching for lowercase\n\t\treturn False , \"Must contain at least one lowercase\"\n\telif c.get_need_uppercase(new_pass1):\n\t\t# Searching for uppercase\n\t\treturn False , \"Must contain at least one uppercase\"\n\telse:\n\t\t# All ok\n\t\treturn True, \"\"\n\ndef validemail(email):\n match = re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$', email)\n if match == None:\n return False\n else:\n return True" }, { "alpha_fraction": 0.6020824313163757, "alphanum_fraction": 0.6029087901115417, "avg_line_length": 36.50413131713867, "blob_id": "a08a207c6046cd6e1c69a794bfec54537c6d68bc", "content_id": "274960d452af825e13d6b1e54dac6a309bdf1a01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18152, "license_type": "permissive", "max_line_length": 334, "num_lines": 484, "path": "/models/only_professor.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom settings.config import Database, Config\nfrom datetime import datetime\nfrom functions.general_functions import *\n\ndef new_class(school_, identificator_, class_, year_, _FK_owner_nick):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO class_list(school, identificator, class, year, status, d_creation,FK_owner_nick) VALUES (%s, %s, %s,%s, %s, %s, %s);\"\n data = (school_, identificator_, class_, year_,'active', now, str(_FK_owner_nick),)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True , 'The class has been successfully entered'\n except Exception as e:\n print e\n return False, 'Something went wrong'\n\ndef new_challenge_p(title_, summary_, description_, aims_, photo_, owner_ ,category_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n token_ = str(get_random_num(4)) + '-' + str(get_random_str(4))\n query = \"INSERT INTO all_challenges(title,token_challenge, photourl, summary, description, aim, created, owner_fk_nick, fk_category) VALUES (%s, %s, %s, %s, %s,%s, %s, %s, %s);\"\n data = (str(title_), str(token_), str(photo_), str(summary_), str(description_), str(aims_), now, str(owner_), str(category_),)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True \n except Exception as e:\n print e\n return False\n\ndef get_id_challenge_by_token(token_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number, token_challenge FROM all_challenges WHERE token_challenge = %s;\"\n data = (token_,)\n cursor.execute(query, data)\n for (id_number, token_challenge) in cursor:\n return int(id_number)\n cursor.close()\n cnx.close()\n return 0\n except Exception as e:\n print e\n return 0\n\ndef new_permission(id_challenge_, nick_user):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO evaluation_permissions(FK_all_challenges_id,FK_nick_evaluator) VALUES (%s, %s);\"\n data = (id_challenge_, nick_user,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n except Exception as e:\n print e\n\ndef new_challenge_p(title_, summary_, description_, aims_, photo_, owner_ ,category_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n token_ = str(get_random_num(4)) + '-' + str(get_random_str(4))\n query = \"INSERT INTO all_challenges(title,token_challenge, photourl, summary, description, aim, created, owner_fk_nick, fk_category) VALUES (%s, %s, %s, %s, %s,%s, %s, %s, %s);\"\n data = (str(title_), str(token_), str(photo_), str(summary_), str(description_), str(aims_), now, str(owner_), str(category_),)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True , token_\n except Exception as e:\n print e\n return False, token_\n\ndef get_all_classes_professor(_FK_owner_nick):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT id_number, school, identificator, class AS class_, year, status FROM class_list WHERE FK_owner_nick = %s ORDER BY d_creation DESC;\"\n\t\tdata = (_FK_owner_nick,)\n\t\tcursor.execute(query, data)\n\t\tr = []\n\t\tfor (id_number, school, identificator, class_, year, status) in cursor:\n\t\t\ttemp = [id_number, school, identificator, class_, year, status]\n\t\t\tr.append(temp)\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn r\n\texcept Exception as e:\n\t\tprint e\n\ndef get_all_students_in_class_by_id(id_,_FK_owner_nick):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT u.nick as nick , u.email as email, u.sex as sex, u.school as school, u.class as classs, u.fk_country as country FROM users as u INNER JOIN classes as c INNER JOIN class_list as cl WHERE c.FK_student_nick = u.nick AND c.FK_class_id_number = %s AND cl.id_number = c.FK_class_id_number AND cl.FK_owner_nick = %s;\"\n data = (id_,_FK_owner_nick,)\n cursor.execute(query, data)\n r = []\n for (nick, email, sex, school, classs, country) in cursor:\n temp = [nick, email, sex, school, classs, country]\n r.append(temp)\n cursor.close()\n cnx.close()\n return r\n except Exception as e:\n print e\n\ndef it_s_my_class_professor(_FK_owner_nick, id_number_):\n\ttry:\n\t\tdatabase_ = Database()\n\t\tconfig = database_.config\n\t\tcnx = mysql.connector.connect(**config)\n\t\tcursor = cnx.cursor()\n\t\tquery = \"SELECT status FROM class_list WHERE FK_owner_nick = %s AND id_number = %s ORDER BY d_creation DESC;\"\n\t\tdata = (_FK_owner_nick,id_number_,)\n\t\tcursor.execute(query, data)\n\t\tfor (status) in cursor:\n\t\t\treturn True\n\t\tcursor.close()\n\t\tcnx.close()\n\t\treturn False\n\texcept Exception as e:\n\t\tprint e\n\t\treturn False\n\ndef add_student_to_class_professor(FK_class_id_number_, FK_student_nick_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO classes(FK_class_id_number, FK_student_nick) VALUES (%s, %s);\"\n data = (FK_class_id_number_, FK_student_nick_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n # FALTA - enlazar estudiantes al desafio\n return True , 'The student has been successfully entered in the class'\n except Exception as e:\n if ('Duplicate entry' in str(e)) and ('for key' in str(e)):\n \treturn False, 'The student was previously registered in this class'\n else:\n \treturn False, 'Something went wrong'\n\ndef add_challenge_to_class_professor(FK_class_id_number_, FK_challenge_id_number_ ):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO class_challenges(FK_class_id_number, FK_challenge_id_number ) VALUES (%s, %s);\"\n data = (FK_class_id_number_, FK_challenge_id_number_,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n # pendiente - enlazar estudiantes al desafio\n return True , 'The challenge has been successfully entered in the class'\n except Exception as e:\n if ('Duplicate entry' in str(e)) and ('for key' in str(e)):\n \treturn False, 'The challenge was previously registered in this class'\n else:\n \treturn False, 'Something went wrong'\n\ndef remove_student_to_class_professor(FK_class_id_number_, FK_student_nick_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"DELETE FROM classes WHERE FK_class_id_number=%s AND FK_student_nick = %s;\"\n data = (FK_class_id_number_, FK_student_nick_,)\n cursor.execute(query, data)\n row = cursor.rowcount\n cnx.commit()\n cnx.close()\n # pendiente - remover estudiantes al desafio\n if row >= 1:\n \treturn True , 'The student has been unlinked successfully of this class'\n else:\n \treturn True , 'No student was unlinked'\n except Exception as e:\n \treturn False, 'Something went wrong'\t\n\ndef remove_challenge_to_class_professor(FK_class_id_number_, FK_challenge_id_number_):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"DELETE FROM class_challenges WHERE FK_class_id_number=%s AND FK_challenge_id_number = %s;\"\n data = (FK_class_id_number_, FK_challenge_id_number_,)\n cursor.execute(query, data)\n row = cursor.rowcount\n cnx.commit()\n cnx.close()\n # pendiente - remover estudiantes al desafio\n if row >= 1:\n \treturn True , 'The challenge has been delete successfully of this class'\n else:\n \treturn True , 'No challenge was eliminated'\n except Exception as e:\n \treturn False, 'Something went wrong'\n\ndef edit_challenge_professor(id_number_challenge, title, summary, description, aim, category , FK_owner_nick):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"UPDATE all_challenges SET title =%s, summary =%s, description =%s, aim =%s, last_edit=%s, fk_category =%s WHERE id_number =%s AND owner_fk_nick =%s;\"\n data = (title, summary, description, aim, now, category, id_number_challenge, FK_owner_nick,)\n cursor.execute(query, data)\n cnx.commit()\n row = cursor.rowcount\n cnx.close()\n # pendiente - remover estudiantes al desafio\n if row >= 1:\n \treturn True , 'The challenge has been edited successfully'\n else:\n \treturn True , 'No changes detected in the challenge'\n except Exception as e:\n \tprint e\n \treturn False, 'Something went wrong'\n\n\ndef edit_class_info_professor(id_class_, school_, identificator_, class_, year_, FK_owner_nick):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"UPDATE class_list SET school =%s, identificator =%s, class =%s, year =%s, d_last_modification =%s WHERE id_number =%s AND FK_owner_nick =%s;\"\n data = (school_, identificator_, class_, year_, now, id_class_, FK_owner_nick,)\n cursor.execute(query, data)\n cnx.commit()\n row = cursor.rowcount\n cnx.close()\n if row >= 1:\n return True , 'The class information, has been edited successfully'\n else:\n return True , 'No changes detected in the class'\n except Exception as e:\n print e\n return False, 'Something went wrong'\n\n\ndef getclassbyid_(_FK_owner_nick, id_number_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number, school, identificator, class as clss, year, d_creation, d_last_modification, FK_owner_nick FROM class_list WHERE FK_owner_nick = %s AND id_number = %s \"\n data = (_FK_owner_nick,id_number_,)\n cursor.execute(query, data)\n for (id_number, school, identificator, clss, year, d_creation, d_last_modification, FK_owner_nick) in cursor:\n return True , [id_number, school, identificator, clss, year, d_creation, d_last_modification, FK_owner_nick]\n cursor.close()\n cnx.close()\n return False, []\n except Exception as e:\n print e\n return False, []\n\n\ndef user_exist_tocreate_user(nick_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT nick FROM users WHERE nick = %s \"\n data = (nick_,)\n cursor.execute(query, data)\n for (nick) in cursor:\n return True\n cursor.close()\n cnx.close()\n return False\n except Exception as e:\n print e\n return True\n\ndef email_exist_tocreate_user(email_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT email FROM users WHERE email = %s \"\n data = (email_,)\n cursor.execute(query, data)\n for (email) in cursor:\n return True\n cursor.close()\n cnx.close()\n return False\n except Exception as e:\n print e\n return True\n\ndef country_exist_tocreate_user(country_):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT country FROM countries WHERE country = %s \"\n data = (country_,)\n cursor.execute(query, data)\n for (country) in cursor:\n return True\n cursor.close()\n cnx.close()\n return False\n except Exception as e:\n print e\n return False\n\ndef challenge_last_activity_finalized(id_response):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT FK_challenge_id_number, number_of_interaction FROM Challenge_last_activity WHERE id_number = %s AND finalized = %s \"\n data = (id_response, '1',)\n cursor.execute(query, data)\n for (FK_challenge_id_number, number_of_interaction) in cursor:\n return True , FK_challenge_id_number\n cursor.close()\n cnx.close()\n return False , 0\n except Exception as e:\n print e\n return False, 0\n\ndef have_permisssions_student_resp(id_challenge_ , FK_nick_evaluator):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT FK_all_challenges_id FROM evaluation_permissions WHERE FK_all_challenges_id = %s AND FK_nick_evaluator = %s \"\n data = (id_challenge_ , FK_nick_evaluator,)\n cursor.execute(query, data)\n for (FK_all_challenges_id) in cursor:\n return True \n cursor.close()\n cnx.close()\n return False \n except Exception as e:\n print e\n return False\n\ndef it_challenge_has_been_eva(id_last_activity , FK_nick_evaluator):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT marks, FK_nick_evaluator FROM evaluation_marks WHERE FK_challenge_last_activity_id = %s AND FK_nick_evaluator = %s \"\n data = (id_last_activity , FK_nick_evaluator,)\n cursor.execute(query, data)\n for (marks, FK_nick_evaluator) in cursor:\n return True , marks\n cursor.close()\n cnx.close()\n return False , -1\n except Exception as e:\n print e\n return False, -1\n\ndef insert_new_mark_challenge_last_activity(id_last_activity, marks, nick_evaluator):\n try:\n c = Config()\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n now = datetime.now()\n query = \"INSERT INTO evaluation_marks(FK_challenge_last_activity_id,marks,FK_nick_evaluator) VALUES (%s, %s, %s);\"\n data = (id_last_activity, marks, nick_evaluator,)\n cursor.execute(query, data)\n cnx.commit()\n cnx.close()\n return True\n except Exception as e:\n return False\n print e\n\ndef get_all_students_by_id_challenge(id_challenge):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT classes.FK_student_nick FROM class_challenges as cc INNER JOIN classes WHERE classes.FK_class_id_number = cc.FK_class_id_number AND cc.FK_challenge_id_number = %s \"\n data = (id_challenge,)\n cursor.execute(query, data)\n r = []\n for (FK_student_nick) in cursor:\n r.append(FK_student_nick)\n cursor.close()\n cnx.close()\n return r\n except Exception as e:\n print e\n return []\n\ndef get_all_nicks_tudents():\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT nick, email FROM users WHERE fk_roluser = %s \"\n data = ('Student',)\n cursor.execute(query, data)\n r = []\n for (nick, email) in cursor:\n r.append([nick])\n cursor.close()\n cnx.close()\n return r\n except Exception as e:\n print e\n return []\n\ndef student_init_it(student_nick,id_challenge):\n try:\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT id_number, FK_student_nick, FK_challenge_id_number, last_response, number_of_interaction, init_date, end_date, finalized FROM Challenge_last_activity WHERE FK_student_nick = %s AND FK_challenge_id_number = %s \"\n data = (student_nick,id_challenge,)\n cursor.execute(query, data)\n for (id_number, FK_student_nick, FK_challenge_id_number, last_response, number_of_interaction, init_date, end_date, finalized) in cursor:\n return True , [id_number, FK_student_nick, FK_challenge_id_number, last_response, number_of_interaction, init_date, end_date, finalized]\n cursor.close()\n cnx.close()\n return False, []\n except Exception as e:\n print e\n return False, []\n" }, { "alpha_fraction": 0.4554707407951355, "alphanum_fraction": 0.6921119689941406, "avg_line_length": 15.375, "blob_id": "a70bbb706a1bd1a982837c3b9fb98704e70c3df5", "content_id": "dbb02b52768a627509686f8c9eedca5439c2983a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 393, "license_type": "permissive", "max_line_length": 23, "num_lines": 24, "path": "/requirements.txt", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "asn1crypto==0.24.0\ncertifi==2018.10.15\ncffi==1.11.5\nClick==7.0\ncryptography==1.7.2\nenum34==1.1.6\nFlask==1.0.2\nFlask-Cors==3.0.7\nidna==2.7\nipaddress==1.0.22\nitsdangerous==1.1.0\nJinja2==2.10.3\njwt==0.5.2\nMarkupSafe==1.1.0\nmysql-connector==2.1.6\npipenv==2018.11.14\npyasn1==0.4.4\npycparser==2.19\nPyJWT==1.6.4\nsix==1.11.0\ntyping==3.5.3.0\nvirtualenv==16.1.0\nvirtualenv-clone==0.4.0\nWerkzeug==0.16.0\n" }, { "alpha_fraction": 0.6647564172744751, "alphanum_fraction": 0.6676217913627625, "avg_line_length": 40.117645263671875, "blob_id": "f5d2f12a8c88d5aa54614482fef8d4343e474de3", "content_id": "5472cdf73420a1e4014ec22cd33e327aac0c4d5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "permissive", "max_line_length": 146, "num_lines": 17, "path": "/models/only_administrador.py", "repo_name": "fvergaracl/gonsa2_backend", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom settings.config import Database, Config\n\ndef a_get_all_users():\n database_ = Database()\n config = database_.config\n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor()\n query = \"SELECT nick, email, sex, school, class as classs, fk_roluser, birth_year, birth_month, birth_day, fk_country FROM users WHERE 1 = 1;\"\n cursor.execute(query,)\n r = []\n for (nick, email, sex, school, classs, fk_roluser, birth_year, birth_month, birth_day, fk_country) in cursor:\n temp = [nick, email, sex, school, classs, fk_roluser, birth_year, birth_month, birth_day, fk_country]\n r.append(temp)\n cursor.close()\n cnx.close()\n return r" } ]
15
seabay/reco_backup
https://github.com/seabay/reco_backup
9bda256dea7f9d1d588ce23c9912d98dc2115e24
9bc2e8f8b80b8723aa9b56d95499e17f7ac47a80
2b6e042a32a7073fccb56d5e96e782f10cd15875
refs/heads/master
2020-06-26T00:01:37.840524
2019-08-19T14:54:33
2019-08-19T14:54:33
199,461,098
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6216099858283997, "alphanum_fraction": 0.6388291120529175, "avg_line_length": 54.75199890136719, "blob_id": "d5ef94e8529e157c72dd7174402bef8b24c214d9", "content_id": "9cc6e52b413a613f0c5547a17aa3bd1eac0a2da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6969, "license_type": "no_license", "max_line_length": 200, "num_lines": 125, "path": "/models/simple_cnn_model.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import numpy as np\nimport builtins\nimport tensorflow as tf\n\n\nclass RecoCNN():\n \n def __init__(self, max_transaction_history = 50, max_product_click_history = 50, max_promotion_click_history = 50,\n category_size = 100, single_categorical_features = None, numeric_features_size = 10,\n hidden_layer1_size = 256, hidden_layer2_size = 128, hidden_layer3_size = 64, activation='relu',\n input_embedding_size = 64, multi_gpu_model=False):\n \n self.max_transaction_history = max_transaction_history\n self.max_product_click_history = max_product_click_history\n self.max_promotion_click_history = max_promotion_click_history\n self.category_size = category_size\n self.hidden_layer1_size = hidden_layer1_size\n self.hidden_layer2_size = hidden_layer2_size\n self.hidden_layer3_size = hidden_layer3_size\n self.single_categorical_features = single_categorical_features\n self.numeric_features_size = numeric_features_size\n self.activation = activation\n self.input_embedding_size = input_embedding_size\n self.multi_gpu_model = multi_gpu_model\n \n self.category_embeddings = tf.keras.layers.Embedding(output_dim=self.input_embedding_size, \n input_dim = self.category_size, mask_zero=False, name='category_embeddings')\n self.filter_sizes = [2,3,4,5]\n self.num_filters = 512\n \n self.dropout = 0.2\n\n self.model = None\n self.build()\n \n \n def build(self):\n seq_layer, seq_embed, singles = self.create_input()\n flatten = self.cnn_seq_encode(seq_embed)\n flatten = tf.keras.layers.Dropout(self.dropout)(flatten)\n merge_input = self.merge_seq_single(flatten, singles)\n v = tf.keras.layers.Dense(512, activation = self.activation)(merge_input)\n v = tf.keras.layers.LayerNormalization()(v)\n v = tf.keras.layers.Dense(self.hidden_layer1_size, activation = self.activation)(v)\n v = tf.keras.layers.LayerNormalization()(v)\n v = tf.keras.layers.Dense(self.hidden_layer2_size, activation = self.activation)(v)\n v = tf.keras.layers.LayerNormalization()(v)\n v = tf.keras.layers.Dense(self.hidden_layer3_size, activation = self.activation, name='user_embedding')(v)\n v = tf.keras.layers.LayerNormalization()(v)\n output = tf.keras.layers.Dense(self.category_size, activation ='softmax', name='softmax_layer')(v)\n self.model = tf.keras.models.Model(inputs = seq_layer + [s[0] for s in singles], outputs = [output]) \n\n if self.multi_gpu_model:\n \n try:\n self.model = tf.keras.utils.multi_gpu_model(self.model, gpus=8, cpu_relocation=True)\n print(\"Training using multiple GPUs..\")\n except:\n print(\"Training using single GPU or CPU..\")\n \n\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.001), metrics=['accuracy']) \n \n def merge_seq_single(self, flatten, singles):\n cat_ = [flatten]\n cat_ += [s[1] for s in singles]\n return tf.keras.layers.concatenate(cat_, axis=1)\n \n def cnn_seq_encode(self, seq_embed):\n \n cat_embedding = tf.keras.layers.concatenate(seq_embed, axis=1)\n cat_embedding = tf.keras.layers.Reshape((self.max_transaction_history*2, self.input_embedding_size,1))(cat_embedding)\n conv_0 = tf.keras.layers.Conv2D(self.num_filters, kernel_size=(self.filter_sizes[0], self.input_embedding_size), padding='valid', kernel_initializer='normal', activation='relu')(cat_embedding)\n conv_1 = tf.keras.layers.Conv2D(self.num_filters, kernel_size=(self.filter_sizes[1], self.input_embedding_size), padding='valid', kernel_initializer='normal', activation='relu')(cat_embedding)\n conv_2 = tf.keras.layers.Conv2D(self.num_filters, kernel_size=(self.filter_sizes[2], self.input_embedding_size), padding='valid', kernel_initializer='normal', activation='relu')(cat_embedding)\n conv_3 = tf.keras.layers.Conv2D(self.num_filters, kernel_size=(self.filter_sizes[3], self.input_embedding_size), padding='valid', kernel_initializer='normal', activation='relu')(cat_embedding)\n\n maxpool_0 = tf.keras.layers.MaxPool2D(pool_size=(self.max_transaction_history*2 - self.filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)\n maxpool_1 = tf.keras.layers.MaxPool2D(pool_size=(self.max_transaction_history*2 - self.filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)\n maxpool_2 = tf.keras.layers.MaxPool2D(pool_size=(self.max_transaction_history*2 - self.filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)\n maxpool_3 = tf.keras.layers.MaxPool2D(pool_size=(self.max_transaction_history*2 - self.filter_sizes[3] + 1, 1), strides=(1,1), padding='valid')(conv_3)\n\n concatenated_tensor = tf.keras.layers.concatenate([maxpool_0, maxpool_1, maxpool_2, maxpool_3])\n flatten = tf.keras.layers.Flatten()(concatenated_tensor)\n \n return flatten\n \n def create_input(self):\n \n transaction_cols = [x for x in range(self.max_transaction_history)]\n promotion_click_cols = [x for x in range(self.max_promotion_click_history)]\n seq_category_cols = [transaction_cols, promotion_click_cols]\n \n seqs = []\n for i, grp in enumerate(seq_category_cols):\n seqs.append(self.seq_categorical_input('seq_categorical_' + str(i), len(grp)))\n\n singles = []\n if self.single_categorical_features:\n for col in self.single_categorical_features:\n singles.append(self.singe_categorical_input(str(col), self.single_categorical_features[col][0],\n self.single_categorical_features[col][1]))\n inp_layer = [s[0] for s in seqs]\n inp_embed = [s[1] for s in seqs]\n \n return inp_layer, inp_embed, singles\n \n \n def seq_categorical_input(self, name, max_history):\n \n seq = tf.keras.layers.Input(shape=(max_history,), dtype='int32', name=name)\n input_embeddings = self.category_embeddings(seq)\n return seq, input_embeddings \n\n \n def singe_categorical_input(self, name, unique_size, embedding_size):\n single = tf.keras.layers.Input(shape=(1,), dtype='int32', name=name)\n embeddings = tf.keras.layers.Embedding(output_dim = embedding_size, input_dim = unique_size, \n input_length=1, name=name + '_embedding')(single)\n embeddings = tf.keras.layers.Flatten(name = 'flatten_' + name)(embeddings)\n return single, embeddings\n \n def continous_inputs(self, size=None, name='numeric'):\n inp = tf.keras.layers.Input(shape=(size,), dtype='float32', name=name)\n return inp, inp\n" }, { "alpha_fraction": 0.7547445297241211, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 49.74074172973633, "blob_id": "3d4ec5b57ffeb8464a84faa410aec3ac92ffe2d3", "content_id": "7a899cec1bae03c3c390922b1e67fb4af6b1719d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1370, "license_type": "no_license", "max_line_length": 171, "num_lines": 27, "path": "/experiments/estimator_exp.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append('..')\n\nimport tensorflow as tf\n\nfrom models import dnn_estimator_model as dnn\n\nfrom configs import configs\nfrom utils import exp_data_util\nfrom data_loader import tfrecords_saver, tfrecords_loader\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n\n## create experiment numpy data\nfeatures, labels = exp_data_util.create_data(data_size=configs.test_data_size, max_transaction_history=configs.max_transaction_history, \\\n max_promotion_click_history=configs.max_promotion_click_history, numeric_size=configs.numeric_size, category_size=configs.category_size)\n\n## save as tfrecord file, running once is ok\ntfrecords_saver.save_as_tfrecords('../data/train/tf.tfrecord',features, labels)\n\nif __name__ == '__main__':\n model = dnn.RecoEstimator(configs.max_transaction_history, configs.max_product_click_history, configs.max_promotion_click_history, category_size=configs.category_size,\n numeric_features_size = configs.numeric_size, input_embedding_size = configs.input_embedding_size,\n single_categorical_features = configs.single_category_cols).model\n\n model.train(input_fn=tfrecords_loader.tfrecords_loader('../data/train').load_func, steps=4000)\n model.evaluate(input_fn=tfrecords_loader.tfrecords_loader('../data/train').load_func, steps=configs.test_data_size//configs.batch_size)\n" }, { "alpha_fraction": 0.7407833933830261, "alphanum_fraction": 0.7442396283149719, "avg_line_length": 44.68421173095703, "blob_id": "ebcc809b31062354c7da57f1aaffe418e3ae26a7", "content_id": "500968678aed398c802d977428dce99968fe6460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1736, "license_type": "no_license", "max_line_length": 165, "num_lines": 38, "path": "/experiments/tfrecords_data_exp.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append('..')\n\nimport tensorflow as tf\n\nfrom models import simple_dnn_model as dnn\nfrom models import simple_cnn_model as cnn\n\nfrom configs import configs\nfrom utils import exp_data_util\nfrom data_loader import tfrecords_saver, tfrecords_loader\n\ntf.compat.v1.logging.set_verbosity(tf.logging.INFO)\n\n## create experiment numpy data\nfeatures, labels = exp_data_util.create_data(data_size=configs.test_data_size, max_transaction_history=configs.max_transaction_history, \\\n max_promotion_click_history=configs.max_promotion_click_history, numeric_size=configs.numeric_size, category_size=configs.category_size)\n\n## save as tfrecord file, running once is ok\ntfrecords_saver.save_as_tfrecords('../data/train/tf.tfrecord',features, labels)\n\n## load tfrecords file\nds = tfrecords_loader.tfrecords_loader('../data/train').load()\n#iter = ds.make_one_shot_iterator()\n\nif __name__ == '__main__':\n model = dnn.RecoDNN(configs.max_transaction_history, configs.max_product_click_history, configs.max_promotion_click_history, category_size=configs.category_size,\n numeric_features_size = configs.numeric_size, input_embedding_size = configs.input_embedding_size,\n single_categorical_features = configs.single_category_cols).model\n\n model.summary()\n\n tf.keras.utils.plot_model(model, to_file='../figures/model.png', show_shapes=True, show_layer_names=True)\n\n #model_est=tf.keras.estimator.model_to_estimator(keras_model=model, model_dir=\"kkt\")\n #train_input = lambda:tfrecords_loader.tfrecords_loader('../data/train').load_func()\n #model_est.train(input_fn=train_input, steps=300)\n model.fit(ds, epochs=50, steps_per_epoch=int(configs.test_data_size//configs.batch_size))\n" }, { "alpha_fraction": 0.6152275800704956, "alphanum_fraction": 0.6244131326675415, "avg_line_length": 54.011234283447266, "blob_id": "06bb81574c44d7ff20248a9d91e88e1b24a2f8e2", "content_id": "197fa4c6788a98c45dc3273118a3aa7e10102fb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4899, "license_type": "no_license", "max_line_length": 301, "num_lines": 89, "path": "/data_loader/tfrecords_loader.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append('..')\n\nimport os\nimport tensorflow as tf\nfrom configs import configs\n\nclass tfrecords_loader():\n\n def __init__(self, path):\n self.path = path\n self.files = self.getListOfFiles(self.path)\n print('loading files: ', self.files)\n\n def parse_function(self, example_proto):\n features = {\"txSeq\":tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n \"clickSeq\":tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n \"genderIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"is_email_verifiedIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"age\":tf.io.FixedLenFeature([1], tf.int64),\n \"cityIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"label\":tf.io.FixedLenFeature([1], tf.int64)\n }\n parsed_features = tf.io.parse_single_example(example_proto, features)\n return (parsed_features[\"txSeq\"], parsed_features[\"clickSeq\"], parsed_features[\"genderIndex\"], parsed_features[\"is_email_verifiedIndex\"], parsed_features[\"age\"], parsed_features[\"cityIndex\"]), parsed_features[\"label\"]\n \n #return (parsed_features[\"txSeq\"], parsed_features[\"clickSeq\"]), parsed_features[\"label\"]\n\n def parse_function_as_dict(self, example_proto):\n features = {\"txSeq\":tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n \"clickSeq\":tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n \"genderIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"is_email_verifiedIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"age\":tf.io.FixedLenFeature([1], tf.int64),\n \"cityIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"label\":tf.io.FixedLenFeature([1], tf.int64)\n }\n parsed_features = tf.io.parse_single_example(example_proto, features)\n return {'txSeq':parsed_features[\"txSeq\"], 'clickSeq':parsed_features[\"clickSeq\"], 'genderIndex':parsed_features[\"genderIndex\"], 'is_email_verifiedIndex':parsed_features[\"is_email_verifiedIndex\"], 'age':parsed_features[\"age\"], 'cityIndex':parsed_features[\"cityIndex\"]}, parsed_features[\"label\"]\n \n\n def load(self):\n padded_shapes = (([configs.max_transaction_history], [configs.max_promotion_click_history], [None],[None],[None], [None]), [None])\n #padded_shapes = (([configs.max_transaction_history], [configs.max_promotion_click_history]), [None])\n ds = tf.data.TFRecordDataset(self.files, buffer_size=configs.buffer_size, num_parallel_reads=os.cpu_count())\n ds = ds.map(self.parse_function, num_parallel_calls=os.cpu_count())\n ds = ds.shuffle(buffer_size=configs.buffer_size)\n ds = ds.padded_batch(configs.batch_size, padded_shapes)\n ds = ds.prefetch(2)\n ds = ds.repeat()\n return ds\n\n def load_func(self):\n #padded_shapes = (([configs.max_transaction_history], [configs.max_promotion_click_history], [None],[None],[None], [None]), [None])\n #padded_shapes = (([configs.max_transaction_history], [configs.max_promotion_click_history]), [None])\n padded_shapes = ({'txSeq':[configs.max_transaction_history], 'clickSeq':[configs.max_promotion_click_history], 'genderIndex':[None], 'is_email_verifiedIndex':[None],\n 'age':[None], 'cityIndex':[None]}, [None])\n ds = tf.data.TFRecordDataset(self.files,buffer_size=configs.buffer_size, num_parallel_reads=os.cpu_count())\n ds = ds.map(self.parse_function_as_dict, num_parallel_calls=os.cpu_count())\n ds = ds.shuffle(buffer_size=configs.buffer_size)\n ds = ds.padded_batch(configs.batch_size, padded_shapes)\n ds = ds.prefetch(1)\n ds = ds.repeat()\n #iterator = ds.make_one_shot_iterator()\n #features, labels = iterator.get_next()\n #return features, labels\n return ds\n\n def load2(self):\n padded_shapes = (([configs.max_transaction_history], [configs.max_promotion_click_history], [None],[None],[None], [None]), [None])\n ds = (tf.data.Dataset.from_tensor_slices(self.files).interleave(lambda x: tf.data.TFRecordDataset(x).map(self.parse_function, num_parallel_calls=tf.data.experimental.AUTOTUNE),cycle_length=4, block_length=32))\n ds = ds.padded_batch(configs.batch_size, padded_shapes)\n ds = ds.prefetch(8)\n return ds\n\n\n def getListOfFiles(self, dirName):\n \n listOfFile = os.listdir(dirName)\n allFiles = list()\n \n for entry in listOfFile:\n fullPath = os.path.join(dirName, entry)\n if os.path.isdir(fullPath):\n allFiles = allFiles + self.getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n \n return allFiles\n\n\n\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 6.333333492279053, "blob_id": "7a473747534ec72622bfe8619b7b08a8155c9159", "content_id": "339ede0a0a10bec0952d3b8aa34cb44cba991d94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 22, "license_type": "no_license", "max_line_length": 13, "num_lines": 3, "path": "/README.md", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "# reco_backup\n\nbackup\n" }, { "alpha_fraction": 0.662162184715271, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 19, "blob_id": "3512e1ae1f4e9ba3b1a74596089870805b74218e", "content_id": "c5ed4e250c5d485d960b0605777c98f840b046eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/configs/configs.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "\nmax_transaction_history = 20\nmax_product_click_history = 20\nmax_promotion_click_history = 20\ninput_embedding_size = 64\ncategory_size = 100\nnumeric_size = 1\n\nbatch_size=32\nepochs=200\nnum_parallel_calls=8\ndata_size=100000\n\n" }, { "alpha_fraction": 0.5389673113822937, "alphanum_fraction": 0.5586926937103271, "avg_line_length": 35.94285583496094, "blob_id": "07c3b9db89707cd944ad3d65ad3cfa4350c38b87", "content_id": "e0be2a61074e9665ed3445be4650510ae9bafe97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5171, "license_type": "no_license", "max_line_length": 135, "num_lines": 140, "path": "/data_loader/data_generator.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import numpy as np\nimport keras\nfrom itertools import islice\n\nclass DataGenerator(keras.utils.Sequence):\n def __init__(self, path, row_size, batch_size=32, n_classes=10, \n seq_category_pos=None, categorical_pos=None, numeric_pos=None, shuffle=True):\n 'Initialization'\n self.path = path\n self.batch_size = batch_size\n self.row_size = row_size\n self.n_classes = n_classes\n self.seq_category_pos=seq_category_pos\n self.categorical_pos=categorical_pos\n self.numeric_pos=numeric_pos\n self.shuffle = shuffle\n self.on_epoch_end()\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(self.row_size / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n pos = (index*self.batch_size, min(self.row_size, (index+1)*self.batch_size))\n X, y = self.__data_generation(pos)\n return X, y\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(self.row_size) ### bugs here\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __data_generation(self, pos):\n \n fts1 = []\n fts2 = []\n fts3 = []\n fts4 = []\n fts5 = []\n fts6 = []\n fts7 = []\n labels = []\n\n with open(self.path) as f:\n for line in islice(f, pos[0], pos[1]):\n line = line.strip().split(\" \")\n label = line[-1]\n \n # update our corresponding batches lists\n fts1.append(np.array([int(float(x)) for x in line[0:self.seq_category_pos[0]]], dtype=\"uint32\"))\n fts2.append(np.array([int(float(x)) for x in line[self.seq_category_pos[0]:self.seq_category_pos[1]]], dtype=\"uint32\"))\n fts3.append(np.array([int(float(x)) for x in line[self.seq_category_pos[1]:self.seq_category_pos[2]]], dtype=\"uint32\"))\n fts4.append(np.array(int(float(line[self.categorical_pos[0]])), dtype=\"uint32\"))\n fts5.append(np.array(int(float(line[self.categorical_pos[1]])), dtype=\"uint32\"))\n fts6.append(np.array(int(float(line[self.categorical_pos[2]])), dtype=\"uint32\"))\n fts7.append(np.array(line[self.numeric_pos[0]:-1]))\n \n labels.append(int(float(label)))\n \n \n # one-hot encode the labels\n labels = keras.utils.to_categorical(labels, num_classes=self.n_classes)\n \n # yield the batch to the calling function\n return ([np.array(fts1), np.array(fts2), np.array(fts3), np.array(fts4), np.array(fts5), \n np.array(fts6), np.array(fts7)], labels)\n\n\n\nimport threading\n\nclass threadsafe_iter:\n \"\"\"Takes an iterator/generator and makes it thread-safe by\n serializing call to the `next` method of given iterator/generator.\n \"\"\"\n def __init__(self, it):\n self.it = it\n self.lock = threading.Lock()\n\n def __iter__(self):\n return self\n\n def __next__(self): # Py3\n return next(self.it)\n \ndef threadsafe_generator(f):\n \"\"\"A decorator that takes a generator function and makes it thread-safe.\n \"\"\"\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g\n\n\n@threadsafe_generator\ndef data_generator(inputPath, batch_size, seq_category_pos=None, categorical_pos=None, numeric_pos=None, mode=\"train\"):\n \n f = open(inputPath, \"r\")\n \n while True:\n \n fts1 = []\n fts2 = []\n fts3 = []\n fts4 = []\n fts5 = []\n fts6 = []\n fts7 = []\n labels = []\n \n # keep looping until we reach our batch size\n while len(labels) < batch_size:\n line = f.readline()\n if line == \"\":\n f.seek(0)\n line = f.readline()\n if mode == \"eval\":\n break\n \n line = line.strip().split(\" \")\n label = line[-1]\n \n # update our corresponding batches lists\n fts1.append(np.array([int(float(x)) for x in line[0:seq_category_pos[0]]], dtype=\"uint32\"))\n fts2.append(np.array([int(float(x)) for x in line[seq_category_pos[0]:seq_category_pos[1]]], dtype=\"uint32\"))\n fts3.append(np.array([int(float(x)) for x in line[seq_category_pos[1]:seq_category_pos[2]]], dtype=\"uint32\"))\n fts4.append(np.array(int(float(line[categorical_pos[0]])), dtype=\"uint32\"))\n fts5.append(np.array(int(float(line[categorical_pos[1]])), dtype=\"uint32\"))\n fts6.append(np.array(int(float(line[categorical_pos[2]])), dtype=\"uint32\"))\n fts7.append(np.array(line[numeric_pos[0]:-1]))\n labels.append(int(float(label)))\n \n \n # one-hot encode the labels\n labels = keras.utils.to_categorical(labels, num_classes=100)\n \n # yield the batch to the calling function\n yield ([np.array(fts1), np.array(fts2), np.array(fts3), np.array(fts4), np.array(fts5), \n np.array(fts6), np.array(fts7)], labels)" }, { "alpha_fraction": 0.6372472643852234, "alphanum_fraction": 0.6543545722961426, "avg_line_length": 51.51020431518555, "blob_id": "46f96169aaa092930457c3692af3be591eb652f5", "content_id": "e003d291c7e692b59955f681c3fe6d1afe4a7e04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2572, "license_type": "no_license", "max_line_length": 152, "num_lines": 49, "path": "/models/dnn_estimator_model.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\nclass RecoEstimator():\n \n def __init__(self, max_transaction_history = 20, max_product_click_history = 20, max_promotion_click_history = 20,\n category_size = 100, single_categorical_features = None, numeric_features_size = 10,\n hidden_layer1_size = 1024, hidden_layer2_size = 512, hidden_layer3_size = 256, activation='relu',\n input_embedding_size = 128):\n \n self.max_transaction_history = max_transaction_history\n self.max_product_click_history = max_product_click_history\n self.max_promotion_click_history = max_promotion_click_history\n self.category_size = category_size\n self.hidden_layer1_size = hidden_layer1_size\n self.hidden_layer2_size = hidden_layer2_size\n self.hidden_layer3_size = hidden_layer3_size\n self.single_categorical_features = single_categorical_features\n self.numeric_features_size = numeric_features_size\n self.activation = activation\n self.input_embedding_size = input_embedding_size\n self.model = None\n self.build()\n \n \n def build(self):\n seqs = self.create_input() \n self.model = tf.estimator.DNNClassifier(feature_columns=seqs,n_classes=self.category_size,\n hidden_units=[self.hidden_layer1_size, self.hidden_layer2_size, self.hidden_layer3_size],\n optimizer=tf.compat.v1.train.AdamOptimizer(0.0005))\n def create_input(self):\n seqs=[]\n seqs.append(self.seq_categorical_input('txSeq'))\n seqs.append(self.seq_categorical_input('clickSeq'))\n \n if self.single_categorical_features:\n for col in self.single_categorical_features:\n seqs.append(self.single_categorical_input(str(col), self.single_categorical_features[col][0], self.single_categorical_features[col][1]))\n\n return seqs\n \n def seq_categorical_input(self, name):\n seq_input = tf.feature_column.categorical_column_with_identity(name, self.category_size)\n seq_emb = tf.feature_column.embedding_column(categorical_column=seq_input, dimension=self.input_embedding_size)\n return seq_emb\n \n def single_categorical_input(self, name, value_size, embedding_size):\n single_input = tf.feature_column.categorical_column_with_identity(name, value_size)\n single_emb = tf.feature_column.embedding_column(categorical_column=single_input, dimension=embedding_size)\n return single_emb" }, { "alpha_fraction": 0.6138527989387512, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 51.3636360168457, "blob_id": "7d4b0c510a735ffb01599732171b6fd550b5e354", "content_id": "208a317dfa62e887bae25f8da70751c0b8ef5aca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 225, "num_lines": 22, "path": "/data_loader/data_verify.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "\nimport tensorflow as tf\ntf.enable_eager_execution()\n\ndef parse_function(example_proto):\n features = {\"txSeq\":tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n \"clickSeq\":tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n \"genderIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"is_email_verifiedIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"age\":tf.io.FixedLenFeature([1], tf.int64),\n \"cityIndex\":tf.io.FixedLenFeature([1], tf.int64),\n \"label\":tf.io.FixedLenFeature([1], tf.int64)\n }\n parsed_features = tf.io.parse_single_example(example_proto, features)\n return (parsed_features[\"txSeq\"], parsed_features[\"clickSeq\"], parsed_features[\"genderIndex\"], parsed_features[\"is_email_verifiedIndex\"], parsed_features[\"age\"], parsed_features[\"cityIndex\"]), parsed_features[\"label\"]\n\n\ntfrecord_dataset = tf.data.TFRecordDataset(['../../data//validation/split_9/part-r-00000'])\n\nparsed_dataset = tfrecord_dataset.map(parse_function)\n\nfor row in parsed_dataset:\n print(row[1])\n\n\n" }, { "alpha_fraction": 0.5936639308929443, "alphanum_fraction": 0.6097337007522583, "avg_line_length": 35.923728942871094, "blob_id": "2fff79aaa832d614fce3c2285e524d3e7f633996", "content_id": "d8b370ba13ba2475e9346e15c0d8eb12b50f919e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4358, "license_type": "no_license", "max_line_length": 107, "num_lines": 118, "path": "/models/losses.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\ndef focal_loss(gamma=2., alpha=4.):\n\n gamma = float(gamma)\n alpha = float(alpha)\n\n def focal_loss_fixed(y_true, y_pred):\n \"\"\"Focal loss for multi-classification\n FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)\n Notice: y_pred is probability after softmax\n gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper\n d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)\n Focal Loss for Dense Object Detection\n https://arxiv.org/abs/1708.02002\n\n Arguments:\n y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]\n y_pred {tensor} -- model's output, shape of [batch_size, num_cls]\n\n Keyword Arguments:\n gamma {float} -- (default: {2.0})\n alpha {float} -- (default: {4.0})\n\n Returns:\n [tensor] -- loss.\n \"\"\"\n epsilon = 1.e-9\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n\n model_out = tf.add(y_pred, epsilon)\n ce = tf.multiply(y_true, -tf.log(model_out))\n weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))\n fl = tf.multiply(alpha, tf.multiply(weight, ce))\n reduced_fl = tf.reduce_max(fl, axis=1)\n return tf.reduce_mean(reduced_fl)\n return focal_loss_fixed\n\n\n\ndef categorical_focal_loss(gamma=2.0, alpha=0.25):\n \"\"\"\n Implementation of Focal Loss from the paper in multiclass classification\n Formula:\n loss = -alpha*((1-p)^gamma)*log(p)\n Parameters:\n alpha -- the same as wighting factor in balanced cross entropy\n gamma -- focusing parameter for modulating factor (1-p)\n Default value:\n gamma -- 2.0 as mentioned in the paper\n alpha -- 0.25 as mentioned in the paper\n \"\"\"\n def focal_loss(y_true, y_pred):\n # Define epsilon so that the backpropagation will not result in NaN\n # for 0 divisor case\n epsilon = tf.keras.backend.epsilon()\n # Add the epsilon to prediction value\n #y_pred = y_pred + epsilon\n # Clip the prediction value\n y_pred = tf.keras.backend.clip(y_pred, epsilon, 1.0-epsilon)\n # Calculate cross entropy\n cross_entropy = -y_true*tf.keras.backend.log(y_pred)\n # Calculate weight that consists of modulating factor and weighting factor\n weight = alpha * y_true * tf.keras.backend.pow((1-y_pred), gamma)\n # Calculate focal loss\n loss = weight * cross_entropy\n # Sum the losses in mini_batch\n loss = tf.keras.backend.sum(loss, axis=1)\n return loss\n \n return focal_loss\n\n\n\ndef _categorical_focal_loss(gamma=2., alpha=.25):\n \"\"\"\n Softmax version of focal loss.\n m\n FL = ∑ -alpha * (1 - p_o,c)^gamma * y_o,c * log(p_o,c)\n c=1\n where m = number of classes, c = class and o = observation\n Parameters:\n alpha -- the same as weighing factor in balanced cross entropy\n gamma -- focusing parameter for modulating factor (1-p)\n Default value:\n gamma -- 2.0 as mentioned in the paper\n alpha -- 0.25 as mentioned in the paper\n References:\n Official paper: https://arxiv.org/pdf/1708.02002.pdf\n https://www.tensorflow.org/api_docs/python/tf/keras/backend/categorical_crossentropy\n Usage:\n model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], metrics=[\"accuracy\"], optimizer=adam)\n \"\"\"\n def categorical_focal_loss_fixed(y_true, y_pred):\n \"\"\"\n :param y_true: A tensor of the same shape as `y_pred`\n :param y_pred: A tensor resulting from a softmax\n :return: Output tensor.\n \"\"\"\n\n # Scale predictions so that the class probas of each sample sum to 1\n y_pred /= tf.keras.backend.sum(y_pred, axis=-1, keepdims=True)\n\n # Clip the prediction value to prevent NaN's and Inf's\n epsilon = tf.keras.backend.epsilon()\n y_pred = tf.keras.backend.clip(y_pred, epsilon, 1. - epsilon)\n\n # Calculate Cross Entropy\n cross_entropy = -y_true * tf.keras.backend.log(y_pred)\n\n # Calculate Focal Loss\n loss = alpha * tf.keras.backend.pow(1 - y_pred, gamma) * cross_entropy\n\n # Sum the losses in mini_batch\n return tf.keras.backend.sum(loss, axis=1)\n\n return categorical_focal_loss_fixed" }, { "alpha_fraction": 0.6374756693840027, "alphanum_fraction": 0.6469280123710632, "avg_line_length": 48.95833206176758, "blob_id": "be455c3675f3a96b8f7229678ba7add2c7439f62", "content_id": "63a0176501a00d682984dd3f8aabe9ce2bfa3daf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7196, "license_type": "no_license", "max_line_length": 192, "num_lines": 144, "path": "/models/simple_dnn_model.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import numpy as np\nimport builtins\nimport tensorflow as tf\n\nimport models.losses as losses\n\nclass RecoDNN():\n \n def __init__(self, max_transaction_history = 20, max_product_click_history = 20, max_promotion_click_history = 20,\n category_size = 100, single_categorical_features = None, numeric_features_size = 1,\n hidden_layer1_size = 256, hidden_layer2_size = 128, hidden_layer3_size = 64, activation='relu',\n input_embedding_size = 64, seq_pooling_mode='cat', multi_gpu_model=False):\n \n self.max_transaction_history = max_transaction_history\n self.max_product_click_history = max_product_click_history\n self.max_promotion_click_history = max_promotion_click_history\n self.category_size = category_size\n self.hidden_layer1_size = hidden_layer1_size\n self.hidden_layer2_size = hidden_layer2_size\n self.hidden_layer3_size = hidden_layer3_size\n self.single_categorical_features = single_categorical_features\n self.numeric_features_size = numeric_features_size\n self.activation = activation\n self.input_embedding_size = input_embedding_size\n self.seq_pooling_mode = seq_pooling_mode\n self.multi_gpu_model = multi_gpu_model\n \n self.category_embeddings = tf.keras.layers.Embedding(input_dim = self.category_size, output_dim=self.input_embedding_size, input_length=20, mask_zero=True, name='category_embeddings')\n \n self.model = None\n\n self.build()\n \n \n def build(self):\n \n inp_layer, inp_embed = self.create_input()\n \n v = tf.keras.layers.Dense(512, activation = self.activation)(tf.keras.layers.concatenate(inp_embed)) \n v = tf.keras.layers.LayerNormalization()(v)\n v = tf.keras.layers.Dense(self.hidden_layer1_size, activation = self.activation)(v)\n v = tf.keras.layers.LayerNormalization()(v)\n v = tf.keras.layers.Dense(self.hidden_layer2_size, activation = self.activation)(v)\n v = tf.keras.layers.LayerNormalization()(v)\n v = tf.keras.layers.Dense(self.hidden_layer3_size, activation = self.activation, name='user_embedding')(v)\n v = tf.keras.layers.LayerNormalization()(v)\n output = tf.keras.layers.Dense(self.category_size, activation ='softmax', name='softmax_layer')(v)\n self.model = tf.keras.models.Model(inputs = inp_layer, outputs = [output]) \n\n if self.multi_gpu_model:\n \n try:\n self.model = tf.keras.utils.multi_gpu_model(self.model, gpus=8, cpu_relocation=True)\n print(\"Training using multiple GPUs..\")\n except:\n print(\"Training using single GPU or CPU..\")\n \n #self.model = tf.keras.utils.multi_gpu_model(self.model, gpus=8, cpu_relocation=True)\n\n #self.model.compile(loss=[losses.categorical_focal_loss(1,1)], optimizer=tf.keras.optimizers.Adam(lr=0.005), metrics=['accuracy'])\n self.model.compile(loss=['sparse_categorical_crossentropy'], optimizer=tf.keras.optimizers.Adam(lr=0.005), metrics=['accuracy'])\n\n \n def create_input(self):\n \n transaction_cols = [x for x in range(self.max_transaction_history)]\n promotion_click_cols = [x for x in range(self.max_promotion_click_history)]\n seq_category_cols = [transaction_cols, promotion_click_cols]\n \n seqs = []\n for i, grp in enumerate(seq_category_cols):\n seqs.append(self.seq_categorical_input('seq_categorical_' + str(i), len(grp), self.seq_pooling_mode))\n\n singles = []\n if self.single_categorical_features:\n for col in self.single_categorical_features:\n singles.append(self.singe_categorical_input(str(col), self.single_categorical_features[col][0], self.single_categorical_features[col][1]))\n inp_layer = [s[0] for s in seqs]\n inp_embed = [s[1] for s in seqs]\n \n return inp_layer, inp_embed\n \n \n def avg_pooling(self, name, max_history):\n seq = tf.keras.layers.Input(shape=(max_history,), dtype='int32', name=name)\n category_embeddings = tf.keras.layers.Embedding(input_dim = self.category_size, output_dim=self.input_embedding_size, mask_zero=True,name=name+'category_embeddings')\n input_embeddings = category_embeddings(seq)\n avg_embedding = tf.keras.layers.GlobalAveragePooling1D(name=name + '_avg_embedding')(input_embeddings, mask=self.category_embeddings.compute_mask(seq))\n return seq, avg_embedding\n\n def max_pooling(self, name, max_history):\n seq = tf.keras.layers.Input(shape=(max_history,), dtype='int32', name=name)\n category_embeddings = tf.keras.layers.Embedding(input_dim = self.category_size, output_dim=self.input_embedding_size, name=name+'category_embeddings')\n input_embeddings = category_embeddings(seq)\n max_embedding = tf.keras.layers.GlobalMaxPooling1D(name=name + '_max_embedding')(input_embeddings)\n #maxf = tf.keras.layers.Lambda(lambda x: tf.keras.backend.max(x, axis=1), name = name + '_max_embedding')\n #max_embedding = maxf(input_embeddings)\n return seq, max_embedding\n\n def cat_pooling(self, name, max_history):\n seq = tf.keras.layers.Input(shape=(max_history,), dtype='int32', name=name)\n category_embeddings = tf.keras.layers.Embedding(input_dim = self.category_size, output_dim=self.input_embedding_size, name=name+'category_embeddings')\n input_embeddings = category_embeddings(seq)\n return seq, tf.keras.layers.Flatten(name = 'flatten_' + name)(input_embeddings)\n\n def seq_categorical_input(self, name, max_history, mode='avg'):\n\n if mode == 'avg':\n return self.avg_pooling(name, max_history)\n elif mode == 'max':\n return self.max_pooling(name, max_history)\n elif mode == 'cat':\n return self.cat_pooling(name, max_history)\n else:\n raise Exception('Not support ' + mode)\n \n def singe_categorical_input(self, name, unique_size, embedding_size):\n single = tf.keras.layers.Input(shape=(1,), dtype='int32', name=name)\n embeddings = tf.keras.layers.Embedding(output_dim = embedding_size, input_dim = unique_size, \n input_length=1, name=name + '_embedding')(single)\n embeddings = tf.keras.layers.Flatten(name = 'flatten_' + name)(embeddings)\n return single, embeddings\n \n def continous_inputs(self, size=None, name='numeric'):\n inp = tf.keras.layers.Input(shape=(size,), dtype='float32', name=name)\n return inp, inp\n\n\n def save(self, checkpoint_path):\n if self.model is None:\n raise Exception(\"You have to build the model first.\")\n\n print(\"Saving model...\")\n self.model.save(checkpoint_path)\n print(\"Model saved\")\n\n\n def load(self, checkpoint_path):\n if self.model is None:\n raise Exception(\"You have to build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(checkpoint_path))\n self.model = tf.keras.models.load_model(checkpoint_path)\n print(\"Model loaded\")\n" }, { "alpha_fraction": 0.7105855941772461, "alphanum_fraction": 0.7252252101898193, "avg_line_length": 39.272727966308594, "blob_id": "c60fcd239bd61dabe6a2613b8ccdff373cf63912", "content_id": "246427bfbe2212b346fe546b51bfe27cfd47749a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 888, "license_type": "no_license", "max_line_length": 155, "num_lines": 22, "path": "/experiments/numpy_data_exp.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append('..')\n\nimport tensorflow as tf\n\nimport models.simple_dnn_model as model\nfrom configs import configs\nfrom utils import exp_data_util\n\nsingle_category_cols = {105:3,106:5,107:10} ## such as location : unique_value_size\n\n## create experiment tfrecord data\nfeatures, labels = exp_data_util.create_data()\n\nif __name__ == '__main__':\n model = model.RecoDNN(configs.max_transaction_history, configs.max_product_click_history, configs.max_promotion_click_history, configs.category_size,\n numeric_features_size = configs.numeric_size, input_embedding_size = configs.input_embedding_size,\n single_categorical_features = single_category_cols).model\n\n model.summary()\n\n model.fit(x=features, y=tf.keras.utils.to_categorical(labels, num_classes=configs.category_size), epochs=configs.epochs, batch_size=configs.batch_size)\n\n\n" }, { "alpha_fraction": 0.6206521987915039, "alphanum_fraction": 0.664130449295044, "avg_line_length": 56.5625, "blob_id": "4370171e9f8036526ee1a023368d31c05cb93aa0", "content_id": "dcc0803db7ad90018ab9935e1bd2ef0c3bcbfe27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 920, "license_type": "no_license", "max_line_length": 161, "num_lines": 16, "path": "/data_loader/tfrecords_saver.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\ndef float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(value)))\n\ndef save_as_tfrecords(output_filename, features, labels):\n with tf.io.TFRecordWriter(output_filename) as writer:\n for (v1,v2,v3,v4,v5,v6, v7) in zip(features[0], features[1], features[2], features[3], features[4], features[5], labels):\n features = {'txSeq': int64_feature(v1), 'clickSeq': int64_feature(v2),\n 'genderIndex': int64_feature(v3),'is_email_verifiedIndex': int64_feature(v4), 'age': int64_feature(v5), 'cityIndex': int64_feature(v6), \\\n 'label': int64_feature(v7)}\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())" }, { "alpha_fraction": 0.6503759622573853, "alphanum_fraction": 0.6954887509346008, "avg_line_length": 52.266666412353516, "blob_id": "f1407cce3ccdbcda85b589dc951ad656cdadc746", "content_id": "cb40dff2fd9760c4bf18e258c5acb272347caede", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 177, "num_lines": 15, "path": "/utils/exp_data_util.py", "repo_name": "seabay/reco_backup", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef create_data(data_size=10000, max_transaction_history=20, max_product_click_history = 20, max_promotion_click_history = 20, category_size = 100, numeric_size = 1):\n\n data1 = np.random.randint(category_size, size=(data_size, max_transaction_history-1))\n data2 = np.random.randint(category_size, size=(data_size, max_promotion_click_history-2))\n inputs = [data1, data2]\n\n single_category_cols = {'genderIndex':(3, 8),'is_email_verifiedIndex': (2, 8), 'age':(12, 8), 'cityIndex':(922,16)} ## such as location : unique_value_size, embedding_size\n for k in single_category_cols:\n inputs.append(np.random.randint(single_category_cols[k][0], size=(data_size, 1)))\n\n labels = np.random.randint(category_size, size=(data_size, 1))\n\n return inputs, labels" } ]
14
ganyguru/Delta_SysAdmin
https://github.com/ganyguru/Delta_SysAdmin
d8e69cc76f2d2654cafa93a689fa4a1a62df8210
440ac51bdec3126f4975af8a330ef47b0505ffe6
39bf6e5dbb82d584b4f3f2c06be69881b25e4556
refs/heads/master
2015-08-12T02:31:44.860942
2014-06-14T19:46:25
2014-06-14T19:46:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3853658437728882, "alphanum_fraction": 0.3886178731918335, "avg_line_length": 29.75, "blob_id": "9f1af174c5d8a565db6a0648f82dda535962da8a", "content_id": "b22d60a5e871c05f0072e3afde3b1086fc7ecc0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 56, "num_lines": 20, "path": "/ListDir.py", "repo_name": "ganyguru/Delta_SysAdmin", "src_encoding": "UTF-8", "text": ">>>import os\n\n>>>def list(n=4,p=\"\"):\n\tif p==\"\":\n\t\tp=raw_input(\"Enter The Directory:\")\n if os.path.isdir(p):\n for filename in os.listdir(p):\n np=p+'/'+filename\n if os.path.isdir(np):\n print '.'*n,filename,'/'\n list(n+4,np)\n \n else:\n print '.'*n,filename\n else:\n print \"Enter a valid Directory!!\"\n\n>>> list()\nEnter The Directory:/dev #output\n#then comes the list in dev directory\n" }, { "alpha_fraction": 0.7245762944221497, "alphanum_fraction": 0.7288135886192322, "avg_line_length": 32.71428680419922, "blob_id": "a023fe3373cde16da374af8a1c3af4c140007725", "content_id": "a3e036a761b8999839c24d0ff64e8616774bb311", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 236, "license_type": "no_license", "max_line_length": 83, "num_lines": 7, "path": "/README.md", "repo_name": "ganyguru/Delta_SysAdmin", "src_encoding": "UTF-8", "text": "Delta_SysAdmin\n==============\nThis contains all the tasks for the System Admin\n\nTask-1:\nIn task one i used default parameters coz there is no static variable in python....\ni wanted to have the \"getting input\" part inside the function.\n" } ]
2
Shyamashrita/WebAppBMI
https://github.com/Shyamashrita/WebAppBMI
80d383be585febd0955a135417a78ee11a49c9ac
6766ba070c49c3c078b0fe2c1d722445ea16a133
4d1362416ce523d56b21faf59160d2cdc084b45d
refs/heads/master
2023-06-29T23:21:46.077616
2021-08-06T13:51:34
2021-08-06T13:51:34
393,391,152
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5540308952331543, "alphanum_fraction": 0.572898805141449, "avg_line_length": 26.809524536132812, "blob_id": "0d0baf0afce39c7153e9226543532645fb01c669", "content_id": "d47fa6fef7cd1d9495a75644753cf2346a1af207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "no_license", "max_line_length": 91, "num_lines": 21, "path": "/bmi_calculator/app.py", "repo_name": "Shyamashrita/WebAppBMI", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template\nfrom flask.scaffold import F\n\napp = Flask(__name__)\n\[email protected](\"/\", methods=['POST', 'GET'])\n\ndef cal():\n bmi = ''\n w = ''\n h = ''\n if request.method == 'POST' and 'weight' in request.form and 'height' in request.form :\n w1 = float(request.form.get('weight'))\n h1 = float(request.form.get('height'))\n bmi = round(w1/((h1/100)**2), 2)\n w = round(w1)\n h = round(h1)\n return render_template('index.html', bmi=bmi, weight=w, height=h )\n\nif __name__ == '__main__':\n app.run(debug=True)" } ]
1
KunalDhanaitkar/System-Monitor-Windows
https://github.com/KunalDhanaitkar/System-Monitor-Windows
32f19f9a3f3ba0c2edfd8da4e8c0026da9644ff0
f408af9ab6cc9aa5c25335a354b1e639ec3301cf
8b9a0b91df5fd78d9da1fd7d2b1b0ad5c61df8d2
refs/heads/main
2023-04-22T04:46:27.100038
2021-05-08T02:25:17
2021-05-08T02:25:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5844635963439941, "alphanum_fraction": 0.5844635963439941, "avg_line_length": 20.91891860961914, "blob_id": "89c7dcf263e3d1133afa851d496d2de68c0cf437", "content_id": "d11cef7d8a73a7701f992d76c1546aa5fb868175", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 53, "num_lines": 37, "path": "/api/views.py", "repo_name": "KunalDhanaitkar/System-Monitor-Windows", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .scripts import *\n\n\n# Create your views here.\ndef index(request):\n # Cpu code\n cores = CPU().get_no_cores()\n total = CPU().total_cpu_usage()\n usage = CPU().usage_per_core()\n\n # Memory Code\n memory = Memory().memory_details()\n\n # Disk\n disk = Disk().get_disk_details()\n\n # Network\n network = Network().network_details()\n\n # Load Average\n loads = LoadAverage().get_load_averages()\n\n # Network Address\n addresses = NetworkAddress().traffic_details()\n\n context = {\n \"cores\": cores,\n \"total\": total,\n \"usage\": usage,\n \"memory\": memory,\n \"disk\": disk,\n \"network\": network,\n \"loads\": loads,\n \"addresses\": addresses,\n }\n return render(request, \"api/index.html\", context)\n" }, { "alpha_fraction": 0.7486725449562073, "alphanum_fraction": 0.7952802181243896, "avg_line_length": 42.43589782714844, "blob_id": "b814260586739256e9552a50467913d96fedb2b2", "content_id": "7e21ea6616c7662c39d678cc3d4381d526fb0490", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1695, "license_type": "no_license", "max_line_length": 307, "num_lines": 39, "path": "/README.md", "repo_name": "KunalDhanaitkar/System-Monitor-Windows", "src_encoding": "UTF-8", "text": "# System Monitor using Django\n\nDjango is a high-level Python Web framework that encourages rapid development and clean, pragmatic design. It is made to build web applications synchronously using HTTP and enables you to trigger realtime notifications from your synchronous Python Web stack since it comes with a HTTP Pusher service. \n\npsutil (process and system utilities) is a cross-platform library for retrieving information on running processes and system utilization (CPU, memory, disks, network, sensors) in Python. It is useful mainly for system monitoring, profiling and limiting process resources and management of running processes.\n\n> This Web Application was developed on Windows Operating System.\n\n## Features\n\n* Disk Utilization\n* Internet Signal Strength\n* CPU Usage\n* Memory Usage\n* Load Averages\n* Network Traffic\n\n## Install Dependencies\n\n* Download the Code and Install all the dependencies in virtual environment using the command\n > `pip install requirements`\n\n## Usage\n\n* Download the Code.\n* pip install virtualenv on your Desktop using CLI.\n* Open the source directory of the Project.\n* Create a virtual enviornment using the command\n > `virtualenv newenv`\n* Install all the dependecies in your virtual enviornment using the command\n > `pip install -r requirements.txt`\n* Execute the Code using the following command\n > `python manage.py runserver`\n* Open the App on Browser using the server link to view the Results.\n\n## Images\n\n![Screenshot1](https://user-images.githubusercontent.com/78525041/117362674-90525700-ae89-11eb-90f8-f84a6d48f759.png)\n![Screenshot2](https://user-images.githubusercontent.com/78525041/116960688-278f9280-ac6f-11eb-81ae-a628f99d53a5.png)\n\n" }, { "alpha_fraction": 0.5378229022026062, "alphanum_fraction": 0.5399754047393799, "avg_line_length": 26.559322357177734, "blob_id": "180ea8f5ba6a3d207d7989d7419201f1ee2bf86f", "content_id": "6cd45277076bc213980c514100cf9c46c251db25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3252, "license_type": "no_license", "max_line_length": 84, "num_lines": 118, "path": "/api/scripts.py", "repo_name": "KunalDhanaitkar/System-Monitor-Windows", "src_encoding": "UTF-8", "text": "import psutil\nimport platform\nfrom datetime import datetime\n\n\ndef compute(res, suffix=\"B\"):\n base = 1024\n for i in [\"\", \"K\", \"M\", \"G\", \"T\"]:\n if res < base:\n return f\"{res:.2f}{i}{suffix}\"\n res = res / base\n\n\nclass CPU:\n def get_no_cores(self):\n cores = psutil.cpu_count()\n return cores\n\n def usage_per_core(self):\n usages = []\n usage_per_core = {}\n for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):\n usage_per_core = {i: percentage}\n usages.append(usage_per_core)\n\n return usages\n\n def total_cpu_usage(self):\n return psutil.cpu_percent()\n\n\nclass Memory:\n def memory_details(self):\n comp_memory = psutil.virtual_memory()\n total_memory = compute(comp_memory.total)\n available_memory = compute(comp_memory.available)\n used_memory = compute(comp_memory.used)\n percentage = comp_memory.percent\n\n results = {\n \"Percentage\": percentage,\n \"Total\": total_memory,\n \"Available\": available_memory,\n \"Used\": used_memory,\n }\n\n return results\n\n\nclass Disk:\n def get_disk_details(self):\n partitions = psutil.disk_partitions()\n for part in partitions[:1]:\n mountpoint = part.mountpoint\n disk_usage = psutil.disk_usage(mountpoint)\n\n used_percentage = disk_usage.percent\n total_disk_size = compute(disk_usage.total)\n free_disk_space = compute(disk_usage.free)\n\n results = {\n \"Used\": used_percentage,\n \"Total\": total_disk_size,\n \"Free\": free_disk_space,\n }\n\n return results\n\n\nclass Network:\n def network_details(self):\n network = psutil.net_io_counters()\n bytes_sent = compute(network.bytes_sent)\n bytes_received = compute(network.bytes_recv)\n packets_sent = compute(network.packets_sent)\n packets_recv = compute(network.packets_recv)\n\n results = {\n \"Bytes Sent\": bytes_sent,\n \"Bytes Received\": bytes_received,\n \"Packets Sent\": packets_sent,\n \"Packets Received\": packets_recv,\n }\n\n return results\n\n\nclass NetworkAddress:\n def traffic_details(self):\n net_interface = psutil.net_if_addrs()\n addresses = []\n netmask = []\n broadcast = []\n ptp = []\n all_addresses = []\n\n for key, value in net_interface.items():\n for item in value:\n if str(item.family) == \"AddressFamily.AF_INET\":\n addresses.append(item.address)\n netmask.append(item.netmask)\n broadcast.append(item.broadcast)\n ptp.append(item.ptp)\n results = {\n \"Interface\": key,\n \"IP Addresses\": addresses,\n \"Netmask\": netmask,\n \"Broadcast\": broadcast,\n \"Ptp\": ptp,\n }\n all_addresses.append(results)\n return all_addresses\n\n\nclass LoadAverage:\n def get_load_averages(self):\n loads = psutil.getloadavg()\n return loads\n" }, { "alpha_fraction": 0.4865470826625824, "alphanum_fraction": 0.6905829310417175, "avg_line_length": 15.518518447875977, "blob_id": "c7c5ed81a65f21e890d6d14c1b24c32bd9f11729", "content_id": "bb3fd9b8ed7ace56ffef1d1cfccdfd61b6901c94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 446, "license_type": "no_license", "max_line_length": 26, "num_lines": 27, "path": "/requirements.txt", "repo_name": "KunalDhanaitkar/System-Monitor-Windows", "src_encoding": "UTF-8", "text": "appdirs==1.4.4\nasgiref==3.3.4\nbackcall==0.2.0\nblack==20.8b1\nclick==7.1.2\ndecorator==5.0.7\nDjango==3.2\nipython==7.22.0\nipython-genutils==0.2.0\njedi==0.18.0\nmypy-extensions==0.4.3\nparso==0.8.2\npathspec==0.8.1\npexpect==4.8.0\npickleshare==0.7.5\nprompt-toolkit==3.0.18\npsutil==5.8.0\nptyprocess==0.7.0\nPygments==2.8.1\npytz==2021.1\nregex==2021.4.4\nsqlparse==0.4.1\ntoml==0.10.2\ntraitlets==5.0.5\ntyped-ast==1.4.3\ntyping-extensions==3.7.4.3\nwcwidth==0.2.5\n" } ]
4
yashwanthsoodini/adwords-graph-matching
https://github.com/yashwanthsoodini/adwords-graph-matching
4b8c7400fe6f8039eafeee4b3c08ebc533f41e6b
1597d412227f3aa25b1500e20f1a369d1cde5532
a7962bd2bffbf719e2930be48580952df1859f6c
refs/heads/master
2021-02-19T02:12:21.490547
2020-03-05T21:01:46
2020-03-05T21:01:46
245,267,116
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5877628922462463, "alphanum_fraction": 0.5923517942428589, "avg_line_length": 34.35135269165039, "blob_id": "d93e7b9aa3bacc1f194e26668574ba31d72f3f46", "content_id": "f427e98c3ebe4f97b6e13f051c7c055f11af6231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2615, "license_type": "no_license", "max_line_length": 107, "num_lines": 74, "path": "/adwords.py", "repo_name": "yashwanthsoodini/adwords-graph-matching", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport sys\n\ndef greedy(bids, queries):\n revenue = 0\n for query in queries:\n match_bids = bids.loc[(bids['Keyword']==query)&(bids['Budget']>=bids['Bid Value'])]\n if match_bids.empty:\n continue\n ix = match_bids['Bid Value'].argmax()\n bid_val = match_bids['Bid Value'].iloc[ix]\n adv = match_bids['Advertiser'].iloc[ix]\n bids.loc[bids['Advertiser']==adv,'Budget'] -= bid_val\n revenue += bid_val\n return revenue\n\ndef msvv(bids, queries):\n def psi(x):\n return 1-np.exp(x-1)\n \n def best_adv(bids):\n bids = bids.assign(Fraction_Spent = (bids['Initial_Budget']-bids['Budget'])/bids['Initial_Budget'])\n bids = bids.assign(Psi = psi(bids['Fraction_Spent']))\n bids = bids.assign(Prod = bids['Bid Value']*bids['Psi'])\n return bids.Advertiser.iloc[bids.Prod.argmax()]\n \n revenue = 0\n bids = bids.assign(Initial_Budget=bids['Budget'])\n for query in queries:\n match_bids = bids[(bids['Keyword']==query)&(bids['Budget']>=bids['Bid Value'])]\n if match_bids.empty:\n continue\n adv = best_adv(match_bids)\n bid_val = float(match_bids.loc[match_bids['Advertiser']==adv,'Bid Value'])\n bids.loc[bids['Advertiser']==adv,'Budget'] -= bid_val\n revenue += bid_val\n return revenue\n\ndef balance(bids, queries):\n revenue = 0\n bids = bids.assign(Initial_Budget=bids['Budget'])\n for query in queries:\n match_bids = bids[(bids['Keyword']==query)&(bids['Budget']>=bids['Bid Value'])]\n if match_bids.empty:\n continue\n adv = match_bids.Advertiser.iloc[match_bids.Budget.argmax()]\n bid_val = float(match_bids.loc[match_bids['Advertiser']==adv,'Bid Value'])\n bids.loc[bids['Advertiser']==adv,'Budget'] -= bid_val\n revenue += bid_val\n return revenue\n\nif __name__ == \"__main__\":\n bids = pd.read_csv(\"bidder_dataset.csv\")\n bids = bids.fillna(method='ffill')\n sum_budgets = bids.Budget.sum()\n queries = np.loadtxt(\"queries.txt\",dtype=\"str\", delimiter=\"\\n\")\n np.random.seed(0)\n if sys.argv[1]=='greedy':\n algo = greedy\n elif sys.argv[1]=='mssv':\n algo = mssv\n elif sys.argv[1]=='balanced':\n algo = balanced\n else:\n print(\"Invalid Algorithm!\")\n print(algo(bids,queries))\n for i in range(100):\n bids = pd.read_csv(\"bidder_dataset.csv\")\n bids = bids.fillna(method='ffill')\n np.random.shuffle(queries)\n rev = algo(bids,queries)\n comp_ratio = rev/sum_budgets\n print(comp_ratio)" } ]
1
shb115/2019cryptocontest
https://github.com/shb115/2019cryptocontest
dbe4ba76df04bcc95e7eddf7d506d5c56d1a14f4
5484bee5fc22f2046ffc7beb5643175f34fb5904
0deeca8fc9e2265de41bc36c3c5b7ab8495dfe83
refs/heads/main
2023-09-02T20:51:59.630317
2021-10-24T01:41:49
2021-10-24T01:41:49
420,302,871
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.3421778082847595, "alphanum_fraction": 0.5365265607833862, "avg_line_length": 23.469026565551758, "blob_id": "0ce87fde8d73adbfb663269e76d1a7ac715fe53a", "content_id": "997b90252159eafefa1bf6c7645d8ad872507f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3058, "license_type": "no_license", "max_line_length": 70, "num_lines": 113, "path": "/No.3/2019암호경진대회_3번.py", "repo_name": "shb115/2019cryptocontest", "src_encoding": "UTF-8", "text": "#문제에서 주어진 것\r\np=0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF\r\na=-3\r\nb=0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B\r\nn=0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551\r\n\r\nr=0xE45054EB5B1ABD976650F7F395BF51D0D8DD193E0174E7A14A1C8C127FBDF2DB\r\ns1=0x09371E411284D26B4FAE3BD85B9545BBBFACE1FFE0868BD7701660A50C6E3F17\r\ns2=0xB84ABC62455C570D5500186D83BFD1E1C23CB3135D4A32CE19B3DB61F1680EDC\r\n\r\nhm1=0x389fa4507cd536c67db35b80b06ab0b0b034b7a5c67cf9a2d06ed00876d568f9\r\nhm2=0x1a2fc26dc7ea5a2a4748b7cb2b1ef193d96ab2c99f93092f69e63075b28d1278\r\n\r\ngx=0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296 \r\ngy=0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5\r\n\r\npublickey=0xdc9213e6cda06195098c901c36c0f20d9b5decd252f5f00bfc5a63cae0c5aeac\r\n\r\n#역원 구하는 함수\r\ndef e_gcd(a,b):\r\n r1, r2 = a, b\r\n s1, s2 = 1, 0\r\n t1, t2 = 0,1\r\n while (r2 > 0):\r\n q = r1 // r2\r\n r = r1 - q * r2\r\n r1, r2 = r2, r\r\n s = s1 - q * s2\r\n s1, s2 = s2, s\r\n t = t1 - q * t2\r\n t1, t2 = t2, t\r\n return s1\r\n\r\n#d구하기\r\nz = r * (s2 - s1)\r\ns = e_gcd(z, n)\r\nd = ((s1 * hm2 - s2 * hm1) * s) % n\r\nprint(hex(d))\r\nprivatekey = d\r\n\r\nk = (e_gcd(s2, n) * (hm2 + d * r)) % n\r\nkinv = e_gcd(k,n)\r\ns = (kinv * (hm2 + d * r)) % n\r\nprint(s == s2)\r\n\r\n#같은 것끼리 더하는 함수 (G+G=2G)\r\ndef same(x1, y1):\r\n lamda1 = (3 * x1 * x1 + a) * e_gcd(2 * y1, p)\r\n x2 = (lamda1 * lamda1 - 2 * x1) % p\r\n y2 = (lamda1 * (x1 - x2) - y1) % p\r\n return x2, y2\r\n\r\n#다른 것끼리 더하는 함수\r\ndef different(x1, y1, x2, y2):\r\n lamda2 = (y2 - y1) * e_gcd((x2 - x1), p)\r\n x3 = (lamda2 * lamda2 - x1 - x2) % p\r\n y3 = (lamda2 * (x1 - x3) - y1) % p\r\n return x3, y3\r\n\r\n#16G를 만드는 함수\r\ndef sixteen(x1, y1):\r\n for i in range(4): \r\n x1, y1 = same(x1, y1)\r\n return x1, y1\r\n\r\n#d의 각 자리\r\nd1 = []\r\ndivisor = 16\r\nfor i in range(64):\r\n d1.append(d % divisor)\r\n d = d // divisor\r\nd1.reverse()\r\n\r\n#d의 각 자리별 좌표 초기화\r\ng = []\r\nfor i in range(64):\r\n g.append([0, 0])\r\n\r\n#d의 각 자리별 좌표 구하기\r\nfor i in range(64):\r\n if d1[63 - i] == 0:\r\n g[63 - i] = [0, 0]\r\n elif d1[63 - i] == 1:\r\n x1, y1 = gx, gy\r\n for j in range(i):\r\n x1, y1 = sixteen(x1, y1)\r\n g[63 - i] = [x1, y1]\r\n elif d1[63 - i] == 2:\r\n x1, y1 = gx, gy\r\n for j in range(i):\r\n x1, y1 = sixteen(x1, y1)\r\n x2, y2 = same(x1, y1)\r\n g[63 - i] = [x2, y2]\r\n else:\r\n x1, y1 = gx, gy\r\n for j in range(i):\r\n x1, y1 = sixteen(x1, y1)\r\n x2, y2 = same(x1, y1)\r\n for j in range(d1[63 - i] - 2):\r\n x3, y3 = different(x1, y1, x2, y2)\r\n x2, y2 = x3, y3\r\n g[63 - i] = [x2, y2]\r\n\r\n#d의 각 자리별 좌표 더하기\r\nx1, y1 = g[0]\r\nfor i in range(1, 64):\r\n x2, y2 = g[i]\r\n if x2 == 0 & y2 == 0:\r\n x3, y3 = x1, y1\r\n else:\r\n x3, y3 = different(x1, y1, x2, y2)\r\n x1, y1 = x3, y3\r\nprint(x1 == publickey)\r\n \r\n \r\n\r\n\r\n" }, { "alpha_fraction": 0.3209212124347687, "alphanum_fraction": 0.5206629633903503, "avg_line_length": 27.794872283935547, "blob_id": "26bf5fb6e1b5355b9bd9efc80b90fbbfce0bfe7a", "content_id": "182b0afb856901cf8d06d4e906b4688aba5462d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4650, "license_type": "no_license", "max_line_length": 227, "num_lines": 156, "path": "/No.4/2019암호분석경진대회_4번.cpp", "repo_name": "shb115/2019cryptocontest", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n\r\nunsigned char AES_Mul(unsigned char a, unsigned char b) {\r\n\tunsigned char result = 0;\r\n\r\n\twhile (b != 0) {\r\n\t\tif (b & 1) {\r\n\t\t\tresult = result ^ a;\r\n\t\t}\r\n\r\n\t\tif (((a >> 7) & 1) == 1) {\r\n\t\t\ta = (a << 1) ^ 0x1B;\r\n\t\t}\r\n\t\telse {\r\n\t\t\ta = a << 1;\r\n\t\t}\r\n\r\n\t\tb = b >> 1;\r\n\t}\r\n\treturn result;\r\n}\r\n\r\nvoid Func(unsigned char key[], unsigned char* x) {\r\n\tunsigned char s[256] =\r\n\t{\r\n\t 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,\r\n\t 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,\r\n\t 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,\r\n\t 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,\r\n\t 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,\r\n\t 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,\r\n\t 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,\r\n\t 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,\r\n\t 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,\r\n\t 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,\r\n\t 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,\r\n\t 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,\r\n\t 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,\r\n\t 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,\r\n\t 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,\r\n\t 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16\r\n\t};\r\n\tunsigned char a[4];\r\n\tunsigned char b[4];\r\n\tint i = 0;\r\n\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\ta[i] = key[i] ^ x[i];\r\n\t}\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\tb[i] = s[a[i]];\r\n\t}\r\n\r\n\tx[0] = AES_Mul(2, b[0]) ^ AES_Mul(3, b[1]) ^ b[2] ^ b[3];\r\n\tx[1] = b[0] ^ AES_Mul(2, b[1]) ^ AES_Mul(3, b[2]) ^ b[3];\r\n\tx[2] = b[0] ^ b[1] ^ AES_Mul(2, b[2]) ^ AES_Mul(3, b[3]);\r\n\tx[3] = AES_Mul(3, b[0]) ^ b[1] ^ b[2] ^ AES_Mul(2, b[3]);\r\n\t//printf(\"�� \");\r\n}\r\n\r\nvoid Round(unsigned char* key, unsigned char* x) {\r\n\tunsigned char temp[16], temp2[4];\r\n\tint i;\r\n\r\n\tfor (i = 0;i < 16;i++) {\r\n\t\ttemp[i] = x[i];\r\n\t}\r\n\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\tx[12 + i] = temp[i];\r\n\t}\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\tx[8 + i] = temp[12 + i];\r\n\t}\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\tx[4 + i] = temp[8 + i];\r\n\t}\r\n\tfor (i = 0;i < 4;i++)\r\n\t\ttemp2[i] = temp[i];\r\n\tFunc(key, temp2);\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\tx[i] = temp2[i] ^ temp[4 + i];\r\n\t}\r\n}\r\n\r\nvoid Round2(unsigned char* key, unsigned char* x) {\r\n\tunsigned char temp[16], temp2[4];\r\n\tint i;\r\n\r\n\tfor (i = 0;i < 16;i++) {\r\n\t\ttemp[i] = x[i];\r\n\t}\r\n\tfor (i = 0;i < 16;i++) {\r\n\t\tx[i] = temp[i];\r\n\t}\r\n\tfor (i = 0;i < 4;i++)\r\n\t\ttemp2[i] = temp[i];\r\n\tFunc(key, temp2);\r\n\tfor (i = 0;i < 4;i++) {\r\n\t\tx[i + 4] = temp2[i] ^ temp[i + 4];\r\n\t}\r\n\r\n}\r\nvoid Hash(int num, unsigned char* x, unsigned char* k, unsigned char* H) {\r\n\tunsigned char key[4], arr[4];\r\n\tint i, j;\r\n\r\n\tfor (i = 0;i < num - 1;i++) {\r\n\t\tfor (j = 0;j < 4;j++) {\r\n\t\t\tkey[j] = k[j];\r\n\t\t\tarr[j] = i + j;\r\n\t\t}\r\n\t\tRound(key, x);\r\n\t\tRound(arr, k);\r\n\t}\r\n\tfor (j = 0;j < 4;j++) {\r\n\t\tkey[j] = k[j];\r\n\t\tarr[j] = i;\r\n\t}\r\n\r\n\tRound2(key, x);\r\n\r\n\tfor (i = 0;i < 16;i++)\r\n\t\tH[i] = x[i];\r\n}\r\n\r\nint main() {\r\n\tint s;\r\n\tfor (s = 1;s < 256;s++)\r\n\t{\r\n\t\t//unsigned char Message[32] = { 0x19, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };\r\n\t\tunsigned char Message[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };\r\n\t\tunsigned char X[16], K[16], H[16], X0[16], K0[16];\r\n\t\tunsigned char key[4];\r\n\t\tint i;\r\n\t\tint j = 4;\r\n\r\n\t\tMessage[12] = Message[12] ^ s;\r\n\t\tMessage[16] = Message[16] ^ s;\r\n\t\tMessage[28] = Message[28] ^ s;\r\n\r\n\t\tfor (i = 0;i < 16;i++) {\r\n\t\t\tX[i] = Message[i];\r\n\t\t\tK[i] = Message[16 + i];\r\n\t\t\tX0[i] = Message[i];\r\n\t\t\tK0[i] = Message[16 + i];\r\n\t\t}\r\n\t\tHash(j, X, K, H);\r\n\r\n\t\tfor (i = 0;i < 16;i++)\r\n\t\t\tH[i] = X0[i] ^ K0[i] ^ H[i];\r\n\t\tfor (i = 0;i < 16;i++)\r\n\t\t\tprintf(\" %X\", H[i]);\r\n\t\tprintf(\"\\n\");\r\n\t}\t\r\n}" }, { "alpha_fraction": 0.32242557406425476, "alphanum_fraction": 0.5710852146148682, "avg_line_length": 33.82119369506836, "blob_id": "c31dc6e3bd20bf9ff125346d662d331a143eea42", "content_id": "58588b209dca90b0d76ec82c38184eb49e0feb70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5431, "license_type": "no_license", "max_line_length": 113, "num_lines": 151, "path": "/No.2/No.2.ino", "repo_name": "shb115/2019cryptocontest", "src_encoding": "UTF-8", "text": "#include <stdio.h> \r\n#include <stdlib.h>\r\n\r\n#define ROUND_NUM 100 \r\ntypedef unsigned char u8;\r\n \r\nu8 sbox2[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7,\r\n 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf,\r\n 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5,\r\n 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,\r\n 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e,\r\n 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,\r\n 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef,\r\n 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,\r\n 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff,\r\n 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d,\r\n 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,\r\n 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,\r\n 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5,\r\n 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e,\r\n 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e,\r\n 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,\r\n 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55,\r\n 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,\r\n 0xb0, 0x54, 0xbb, 0x16 };\r\n\r\nu8 sbox3[256] = { 0x9c, 0x83, 0x88, 0x84, 0x0d, 0x94, 0x90, 0x3a, 0xcf, 0xfe, 0x98, 0xd4, 0x01, 0x28, 0x54, 0x89,\r\n 0x35, 0x7d, 0x36, 0x82, 0x05, 0xa6, 0xb8, 0x0f, 0x52, 0x2b, 0x5d, 0x50, 0x63, 0x5b, 0x8d, 0x3f,\r\n 0x48, 0x02, 0x6c, 0xd9, 0xc9, 0xc0, 0x08, 0x33, 0xcb, 0x5a, 0x1a, 0x0e, 0x8e, 0x27, 0xce, 0xea,\r\n 0xfb, 0x38, 0xdc, 0x3c, 0xe7, 0x69, 0xfa, 0x65, 0xf8, 0xed, 0x7f, 0x1d, 0x14, 0xd8, 0x4d, 0x8a,\r\n 0xf6, 0x7c, 0xd3, 0xe5, 0xe4, 0x91, 0xa5, 0x5f, 0xad, 0xc4, 0x29, 0x4c, 0xd6, 0x1c, 0xd0, 0x7b,\r\n 0xac, 0x2e, 0xff, 0x12, 0xdf, 0x03, 0x4e, 0xa4, 0x95, 0x34, 0x41, 0xc6, 0xb5, 0xb3, 0xa7, 0x30,\r\n 0x2f, 0x10, 0x55, 0x04, 0xbc, 0xb2, 0xcc, 0x7a, 0xba, 0x06, 0xfd, 0x80, 0xaf, 0xc3, 0x60, 0x57,\r\n 0xae, 0x5c, 0xbf, 0x70, 0x6d, 0x62, 0xc7, 0x0a, 0x43, 0x49, 0x25, 0xde, 0xef, 0x00, 0x0c, 0x2d,\r\n 0x32, 0xf3, 0xec, 0x13, 0xa0, 0x68, 0xbb, 0xe8, 0x3b, 0x58, 0x81, 0xc2, 0x9b, 0xa2, 0xe6, 0x8c,\r\n 0x9f, 0x7e, 0xb0, 0x23, 0xdd, 0xd5, 0x6f, 0x77, 0xb9, 0x11, 0x47, 0xeb, 0x21, 0xa1, 0xf4, 0x24,\r\n 0x1f, 0xcd, 0xc5, 0xf5, 0xb6, 0xf9, 0xdb, 0xa3, 0x3d, 0x2c, 0x53, 0x9d, 0x6e, 0x6a, 0x1b, 0x86,\r\n 0x18, 0x37, 0xc8, 0x92, 0x72, 0x2a, 0xb1, 0x56, 0x93, 0xa9, 0x0b, 0x15, 0x9a, 0x85, 0x51, 0xf7,\r\n 0x45, 0x87, 0xda, 0xd1, 0xe3, 0x59, 0x4b, 0x39, 0x17, 0x22, 0x8b, 0xe0, 0xb4, 0x42, 0x74, 0x75,\r\n 0x8f, 0xc1, 0x4a, 0x99, 0xb7, 0xfc, 0x09, 0xf1, 0x9e, 0xca, 0xa8, 0x46, 0x79, 0x3e, 0xe2, 0x61,\r\n 0x1e, 0x07, 0x67, 0xee, 0x96, 0x26, 0x71, 0x6b, 0x64, 0xe1, 0x78, 0x16, 0x31, 0xaa, 0xd7, 0x20,\r\n 0x73, 0x5e, 0x76, 0xf2, 0x40, 0x19, 0xbd, 0x97, 0xbe, 0x66, 0xd2, 0xf0, 0x4f, 0xab, 0x44, 0xe9 };\r\n\r\nvoid key_gen(u8* rnd, u8* key) { \r\n u8 key1 = ~key[0]; \r\n u8 key2 = ~key[1]; \r\n u8 A, B;\r\n\r\n A = key1 + key2;\r\n rnd[0] = (key1 | key2) + 0xff;\r\n rnd[1] = A - rnd[0];\r\n\r\n B = ~rnd[0] + ~rnd[1];\r\n key2 = ~(rnd[0] & rnd[1]) + 0xfe;\r\n key1 = B - key2;\r\n\r\n rnd[2] = key1;\r\n rnd[3] = key2;\r\n\r\n int i, j = 4, k=0xfd;\r\n for (i = 2; i < ROUND_NUM; i=i+2) {\r\n key1 = ~(key1 & key2) + (k--); \r\n key2 = A - key1;\r\n\r\n rnd[j++] = key1; \r\n rnd[j++] = key2;\r\n\r\n key2 = ~(key1 & key2) + (k--); \r\n key1 = B - key2;\r\n \r\n rnd[j++] = key1; \r\n rnd[j++] = key2;\r\n }\r\n}\r\n\r\nvoid enc(u8* text, u8* rnd) { \r\n u8 text1 = ~text[0]; \r\n u8 text2 = ~text[1]; \r\n u8 tmp1, tmp2;\r\n\r\n // 0 ROUND\r\n tmp1 = ((text1 + text2) ^ text1) - rnd[0];\r\n tmp2 = (-text1) ^ rnd[1];\r\n \r\n int i, j = 2; \r\n for (i = 1; i < ROUND_NUM - 1; i = i + 2) { \r\n text1 = tmp1; \r\n text2 = sbox3[tmp2];\r\n\r\n tmp1 = ((text1 + text2) ^ text2) - rnd[j++]; \r\n tmp2 = (-text2) ^ rnd[j++];\r\n \r\n text1 = tmp1; \r\n text2 = sbox3[tmp2];\r\n\r\n tmp1 = ((text1 + text2) ^ text1) - rnd[j++]; \r\n tmp2 = (-text1) ^ rnd[j++];\r\n \r\n } \r\n \r\n // 99 ROUND\r\n text1 = tmp1;\r\n text2 = sbox3[tmp2];\r\n \r\n tmp1 = ((text1 + text2) ^ text2) - rnd[198];\r\n tmp2 = (-text2) ^ rnd[199];\r\n \r\n text[0] = ~tmp1;\r\n text[1] = sbox2[tmp2];\r\n \r\n}\r\n\r\n\r\nvoid testVector(){\r\n u8 key[2] = { 0x12, 0x34 }; \r\n u8 rnd[ROUND_NUM * 2] = { 0, }; \r\n u8 text[2] = { 0x56, 0x78 };\r\n\r\n Serial.print(\"plain text : \");Serial.print(text[0],HEX);Serial.print(text[1],HEX);\r\n key_gen(rnd, key);\r\n enc(text, rnd); \r\n Serial.print(\"\\ncipher text : \");Serial.print(text[0],HEX);Serial.println(text[1],HEX);\r\n \r\n}\r\n\r\n\r\nvoid loop(){\r\n}\r\n\r\nvoid setup(){\r\n Serial.begin(9600);\r\n\r\n u8 key[2] = { 0x12, 0x34 }; \r\n u8 rnd[ROUND_NUM * 2] = { 0, }; \r\n u8 text[2] = { 0x56, 0x78 };\r\n\r\n u32 time1; \r\n u32 time2;\r\n\r\n //테스트벡터 확인\r\n testVector();\r\n\r\n //벤치마크\r\n time1 = millis(); \r\n for (int i = 0; i < 10000; i++) { \r\n key_gen(rnd, key);\r\n enc(text, rnd); \r\n } \r\n time2 = millis();\r\n Serial.println((time2 - time1));\r\n}\r\n" }, { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.6742424368858337, "avg_line_length": 9.230769157409668, "blob_id": "4ae0116829ca20c644265e4ee9604bc4a506db24", "content_id": "f0bbcc72c0fbc0f59981ad49ce0ea6edb980b740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 258, "license_type": "no_license", "max_line_length": 29, "num_lines": 13, "path": "/README.md", "repo_name": "shb115/2019cryptocontest", "src_encoding": "UTF-8", "text": "# 2019cryptocontest\n\n2019 암호분석경진대회 - 방수민, 신민정, 신한범\n\n1번문제 - 암호퍼즐\n\n2번문제 - 암호구현 및 최적화\n\n3번문제 - 디지털 서명\n\n4번문제 - 압축함수 충돌쌍 찾기\n\n5번문제 - 부채널 분석" } ]
4
sidyvan/joias_project
https://github.com/sidyvan/joias_project
644a4a924d0f1f0975ee49f5ba224ba150a8db34
608072b7bb40d12d323b9b3e1ca55b59555def63
ea4009658adf7e95e689a7ac2d6dccbbee21921a
refs/heads/master
2022-12-02T16:05:19.026511
2020-07-28T13:22:13
2020-07-28T13:22:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.53644859790802, "alphanum_fraction": 0.5607476830482483, "avg_line_length": 34.66666793823242, "blob_id": "b80e67ebbcb7ed8e4c17e04e709b62871c99d65b", "content_id": "7f935cec8841bdea5c9e19bd65cc3a84bc50027e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 114, "num_lines": 30, "path": "/clientes/migrations/0001_initial.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-07-23 17:03\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cliente',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nome', models.CharField(max_length=255)),\n ('endereco', models.CharField(max_length=255)),\n ('descricao', models.TextField()),\n ('telefone', models.CharField(max_length=32)),\n ('CPF', models.CharField(max_length=11)),\n ('date', models.DateField(default=datetime.datetime.now)),\n ('status', models.CharField(choices=[('ativo', 'ATIVO'), ('inativo', 'INATIVO')], max_length=7)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6971770524978638, "alphanum_fraction": 0.7040205597877502, "avg_line_length": 32.400001525878906, "blob_id": "bb4aca0217ae3c550cece280540e6535bc42a3bb", "content_id": "4b3338e454c4b7423fb4a08dc364b931fdfde08e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 96, "num_lines": 35, "path": "/vendas/models.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom datetime import datetime\nfrom django.core.validators import MinValueValidator\nfrom clientes.models import Cliente\nfrom decimal import Decimal\n\n\nclass Venda(models.Model):\n \n\n\n valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[\n MinValueValidator(Decimal('0.00'))], null=True, blank=True)\n \n descricao_venda = models.TextField()\n data_compra = models.DateField(default=datetime.now)\n data_vencimento = models.DateField(default=datetime.now)\n cliente = models.ForeignKey(Cliente , on_delete=models.CASCADE, related_name='vendas')\n status = models.CharField(max_length=10,default='Andamento')\n \n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n \n\n def __str__(self):\n return self.cliente.nome\n\n\nclass parcelas(models.Model):\n venda = models.ForeignKey(Venda , on_delete=models.CASCADE, related_name='parcela_vendas')\n valor = models.FloatField()\n data = models.DateField(default=datetime.now)\n \n def __str__(self):\n return self.venda.descricao_venda # TODO\n" }, { "alpha_fraction": 0.5635775923728943, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 31, "blob_id": "4b8ffbadeae0f9548f269cbe17794f6d1ed1824d", "content_id": "6c3dbf8a21f6ce65bcc0cee83ce9bd874c4361a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 140, "num_lines": 29, "path": "/vendas/migrations/0002_auto_20200728_0029.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-07-28 03:29\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('vendas', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='venda',\n name='status',\n field=models.CharField(default='Andamento', max_length=10),\n ),\n migrations.CreateModel(\n name='parcelas',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('valor', models.FloatField()),\n ('data', models.DateField(default=datetime.datetime.now)),\n ('venda', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parcela_vendas', to='vendas.Venda')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 27.5, "blob_id": "9e79b0e729a39c741627f324475c7e997d77e2e3", "content_id": "402ed4e2e941e58c6f5d6ea21ab3d9462f724da4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 65, "num_lines": 6, "path": "/vendas/views.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import Venda\n\ndef newSale(request):\n vendas = Venda()\n return render(request, 'vendas/venda.html',{'vendas': vendas})\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 24.600000381469727, "blob_id": "96d28a54e2a2c7ac902490a90b62518690a8ffcd", "content_id": "a10af52ec00d85dcabcc9739c44e6e9f801b3e3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/vendas/admin.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Venda, parcelas\n\nadmin.site.register(Venda)\nadmin.site.register(parcelas)\n" }, { "alpha_fraction": 0.6659877896308899, "alphanum_fraction": 0.6659877896308899, "avg_line_length": 29.6875, "blob_id": "3282feaeb37ba1be178baa7d81009bda674691c5", "content_id": "08cbf1e6974709430bc33186d1e044840793714e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/clientes/urls.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n \n path('', views.clienteList,name='cliente-list'),\n path('cliente/<int:id>', views.clienteView,name='cliente-view'),\n path('newcliente/', views.newCliente,name='cliente-new'),\n path('edit/<int:id>', views.editCliente,name='cliente-edit'),\n # path('changestatus/<int:id>', views.changeStatus,name='cliente-changestatus'),\n path('delete/<int:id>', views.deleteCliente,name='delete-edit'),\n \n \n\n \n]\n" }, { "alpha_fraction": 0.6012608408927917, "alphanum_fraction": 0.6304176449775696, "avg_line_length": 38.65625, "blob_id": "3202c39b709fd3314e1a2c478fef88766eca7fb7", "content_id": "28e2e438e722082fc2b10a8922a390db417267ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 181, "num_lines": 32, "path": "/vendas/migrations/0001_initial.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-07-24 19:43\n\nimport datetime\nfrom decimal import Decimal\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('clientes', '0004_auto_20200723_1516'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Venda',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('valor_total', models.DecimalField(blank=True, decimal_places=2, max_digits=13, null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),\n ('descricao_venda', models.TextField()),\n ('data_compra', models.DateField(default=datetime.datetime.now)),\n ('data_vencimento', models.DateField(default=datetime.datetime.now)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vendas', to='clientes.Cliente')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6272600889205933, "alphanum_fraction": 0.6425591111183167, "avg_line_length": 23.55172348022461, "blob_id": "63831918127473fa281dbd40124ebc4d22b40e75", "content_id": "f993c605b7d76cd29530bcbbeab9a369815aa573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 60, "num_lines": 29, "path": "/clientes/models.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom datetime import datetime\n\n\nclass Cliente(models.Model):\n\n STATUS = (\n ('ativo','ATIVO'),\n ('inativo','INATIVO'),\n\n )\n\n nome = models.CharField(max_length=255)\n endereco = models.CharField(max_length=255)\n descricao = models.TextField()\n telefone = models.CharField(max_length=32)\n CPF = models.CharField(max_length=11,unique=True)\n data_nascimento = models.DateField(default=datetime.now)\n\n status = models.CharField(\n max_length=7,\n choices = STATUS, \n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n \n\n def __str__(self):\n return self.nome\n\n \n\n" }, { "alpha_fraction": 0.5146341323852539, "alphanum_fraction": 0.5951219797134399, "avg_line_length": 21.77777862548828, "blob_id": "bcd0be3e39124d170da9a4faa3d254505eabe517", "content_id": "2d210d29bb019fa25c1181f7840affcacaa0648f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 75, "num_lines": 18, "path": "/clientes/migrations/0003_auto_20200723_1515.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-07-23 18:15\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clientes', '0002_auto_20200723_1436'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='cliente',\n name='CPF',\n field=models.CharField(blank=True, max_length=11, unique=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6483424305915833, "alphanum_fraction": 0.6546370387077332, "avg_line_length": 27.926828384399414, "blob_id": "9ac416d1b9303e9ac158278c60554504b08c8b9a", "content_id": "a68a243a88c051c922c922bda7f702e08b0860ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2383, "license_type": "no_license", "max_line_length": 134, "num_lines": 82, "path": "/clientes/views.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Cliente\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom .forms import ClienteForm\n\nfrom django.contrib import messages\n\n@login_required\ndef clienteList(request):\n\n search = request.GET.get('search')\n filter = request.GET.get('filter')\n # tasksDoneRecently = Task.objects.filter(done='done', updated_at__gt=datetime.datetime.now()-datetime.timedelta(days=30)).count()\n # tasksDone = Task.objects.filter(done='done', user=request.user).count()\n # tasksDoing = Task.objects.filter(done='doing', user=request.user).count()\n\n if search:\n\n clientes = Cliente.objects.filter(CPF__icontains=search)\n \n elif filter:\n\n clientes = Cliente.objects.filter(status=filter)\n \n\n else:\n\n\n clientes_list = Cliente.objects.all().order_by('-created_at')\n\n paginator = Paginator(clientes_list, 5)\n\n page = request.GET.get('page')\n\n clientes = paginator.get_page(page)\n\n return render(request, 'clientes/list.html',{'clientes': clientes})\n@login_required\ndef clienteView(request, id):\n cliente = get_object_or_404(Cliente, pk=id) \n return render(request,'clientes/cliente.html',{'cliente': cliente})\n\n@login_required\ndef newCliente(request):\n \n form = ClienteForm(request.POST or None)\n if request.method == 'POST':\n\n if form.is_valid():\n form.save()\n \n return redirect('/')\n\n\n \n return render(request, 'clientes/addcliente.html', {'form': form})\n\n@login_required\ndef editCliente(request, id):\n cliente = get_object_or_404(Cliente, pk=id)\n form = ClienteForm(instance=cliente)\n\n if(request.method == 'POST'):\n form = ClienteForm(request.POST, instance=cliente)\n\n if(form.is_valid()):\n cliente.save()\n return redirect('/')\n \n else:\n return render(request, 'clientes/editcliente.html', {'form': form, 'cliente': cliente})\n else:\n return render(request, 'clientes/editcliente.html', {'form': form, 'cliente': cliente})\n\n\n@login_required\ndef deleteCliente(request, id):\n cliente = get_object_or_404(Cliente, pk=id)\n cliente.delete()\n messages.info(request, 'Cliente deletado com Sucesso!')\n return redirect('/')\n\n\n \n\n\n\n" }, { "alpha_fraction": 0.5874125957489014, "alphanum_fraction": 0.5874125957489014, "avg_line_length": 10.916666984558105, "blob_id": "5abb72b03f5261bfa59aae2f5466c55d34ca165a", "content_id": "56f16196bdea7365e6c53712775f7e582a8b1b88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/vendas/urls.py", "repo_name": "sidyvan/joias_project", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n \n path('newsale/', views.newSale,name='sale-new'),\n \n \n \n\n \n]\n" } ]
11
AkashKarki/Bank-Database
https://github.com/AkashKarki/Bank-Database
76dc1cad03b0dd63c691db642b0646913f42259e
ae41fc5da06bf28da22d7d72e3d24cbe8a6c9ecd
37fbb2d3621c511d2730e3c9317f74a4e35bcd78
refs/heads/master
2020-12-06T23:28:49.582259
2017-06-27T06:12:20
2017-06-27T06:12:20
95,523,350
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4203396737575531, "alphanum_fraction": 0.43063265085220337, "avg_line_length": 48.82234573364258, "blob_id": "3aaceb3eff7a9753a2a694120b7a83e0eea1702b", "content_id": "1be7b3e0edd5cfb27eaa3d335f72d8bc956ce207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54406, "license_type": "no_license", "max_line_length": 550, "num_lines": 1092, "path": "/BANK_FINAL_pro v1.py", "repo_name": "AkashKarki/Bank-Database", "src_encoding": "UTF-8", "text": "import cx_Oracle #Imports all the classes and functions necessary for establishing a connection from Oracle Database.\nclass customer:\n def __init__(self):\n self.conn = cx_Oracle.connect('TEST/root@xe')\n print(\"connected\")\n self.cur = self.conn.cursor() #creating an object of a cursor class\n\n def new_coustomer(self, fname, lname, mobno, add1, add2, city, state, pin, gen, email, password): #function to create new customer.\n self.cur.execute(\"insert into customer values (C_ID_VAL.NEXTVAL,:pram1,:pram2,:pram3,:pram4,:pram5,:pram6,:pram7,:pram8,:pram9,:pram10,:pram11)\",(fname, lname, mobno, add1, add2, city, state, pin, gen, email, password))\n query = \"select C_ID_VAL.currval from dual\"\n self.cur.execute(query)\n ren = self.cur.fetchall()\n ren = list(sum(ren, ()))\n print(\"Welcome \"+fname+\"!\")\n print(\"Your coustomer ID= \", ren[0])\n print(\"Your password= \", password)\n self.conn.commit()\n\n\nclass CurrentAccount:\n def __init__(self):\n self.conn = cx_Oracle.connect('TEST/root@xe')\n print(\"connected\")\n self.cur = self.conn.cursor()\n\n def open_account(self, c_id): #open new current account \n bal = int(input(\"enter balance you want to enter in your new account: \"))\n if bal<=0: #checking for a valid amount\n print(\"Please enter a valid amount.\\n\")\n else:\n self.cur.execute(\"insert into current_acc values(ACCOUNT_NO_VAL2.NEXTVAL,:pram1,'ACTIVE',to_date(sysdate,'DD/MM/YY'),NUll,:pram2)\",(c_id, bal))\n self.conn.commit()\n print(\"Current Account Open Successful\\n\")\n\n def deposit(self, c_id): #function for deposit\n amount = int(input(\"Enter amount to deposit: \"))\n if amount<=0:\n print(\"Please enter a valid amount.\\n\")\n else:\n self.cur.execute(\"select c_id from current_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret): #to check whether customer ID exists or not\n pass\n else:\n print(c_id + \" does not have a Current Account\\n\")\n return\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n new_amount = ret + amount\n self.cur.execute(\"UPDATE current_acc SET balance=:1 where c_id=:2\", (new_amount, c_id)) #UPDATE BALANCE IN CURRENT ACCOUNT FOR RESPECTIVE C_ID\n # self.conn.commit()\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'deposit',:2,:3,'Current Account')\",(c_id, amount, new_amount)) #UPDATE HISTORY TABLE\n self.conn.commit() #COMMIT ALL THE VALUES\n print(\"Amount Deposited Successfully\\n\")\n\n def withdraw(self, c_id): #FUNCTION TO WITHDRAW FROM CURRENT_ACCOUNT\n amount = int(input(\"enter amount to withdraw: \"))\n if amount<=0:\n print(\"Please enter a valid amount.\\n\")\n else:\n self.cur.execute(\"select c_id from current_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not have a Current Account.\\n\")\n return\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n if ret < amount:\n print(\"your account has \" + str(ret) + \" amount\")\n return\n else:\n ret = ret - amount\n self.cur.execute(\"UPDATE current_acc SET balance=:1 where c_id=:2\", (str(ret), c_id))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'withdrawl',:2,:3,'Current Account')\",(c_id, amount, ret))\n self.conn.commit()\n print(\"Amount Withdrawn Successfully\\n\")\n\n def transfer(self, c_id): #FUNCTION TO TRANFER FROM CURRENT ACCOUNT\n print(\"enter amount to transfer\")\n amount = int(input())\n if amount<=0:\n print(\"Please enter a valid amount.\\n\")\n return\n else:\n self.cur.execute(\"select c_id from current_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not have a current account\\n\")\n return\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n if ret < amount: #CHECKING WHETHER AMOUNT ENTERED IS GREATER THAN THE BALANCE\n print(\"your account has \" + str(ret) + \" amount\")\n return\n print(\"enter customer ID in which you want to transfer the amount: \")\n c_id1 = input()\n print(\"1. transfer into saving account\\n2. transfer into current account\")\n ch = int(input())\n if ch == 1:\n try:\n self.cur.execute(\"select c_id from saving_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id1) in ret):\n pass\n else:\n print(c_id1 + \" does not have a saving account.\\n\")\n return\n self.cur.execute(\"select status from SAVING_ACC where c_id=\" + c_id1 + \"\")\n stu = self.cur.fetchall()\n stu = list(sum(stu, ()))\n stu = stu[0]\n if str(stu) == \"LOCK\" or str(stu) == \"CLOSE\":\n print(c_id1 + \" is locked or closed.\\n\")\n return\n else:\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret - amount\n self.cur.execute(\"UPDATE current_acc SET balance=:1 where c_id=:2\", (str(ret), c_id))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'transfer',:2,:3,'Current Account')\",(c_id, amount, ret))\n self.conn.commit()\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id1 + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret + amount\n self.cur.execute(\"UPDATE saving_acc SET balance=:1 where c_id=:2\", (str(ret), c_id1))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'deposit',:2,:3,'Saving Account')\",(c_id1, amount, ret))\n self.conn.commit()\n print(\"Transaction successful.\\n\")\n except Exception as e:\n print(e)\n elif ch == 2:\n try:\n self.cur.execute(\"select c_id from current_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id1) in ret):\n pass\n else:\n print(c_id1 + \" does not have a current account.\\n\")\n return\n self.cur.execute(\"select status from current_acc where c_id=\" + c_id1 + \"\")\n stu = self.cur.fetchall()\n stu = list(sum(stu, ()))\n stu = stu[0]\n if str(stu) == \"CLOSE\" or str(stu) == \"LOCK\":\n print(c_id1 + \" is locked or closed.\\n\")\n return\n if str(c_id)==str(c_id1):\n print(\"\\nyou can not transfer the amount to same account\")\n return\n else:\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret - amount\n self.cur.execute(\"UPDATE current_acc SET balance=:1 where c_id=:2\", (str(ret), c_id))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'transfer',:2,:3,'Current Account')\",(c_id, amount, ret))\n self.conn.commit()\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id1 + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret + amount\n self.cur.execute(\"UPDATE current_acc SET balance=:1 where c_id=:2\", (str(ret), c_id1))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'deposit',:2,:3,'Current Account')\",(c_id1, amount, ret))\n self.conn.commit()\n print(\"Transaction successful.\\n\")\n except Exception as e:\n print(e)\n\n\nclass SavingAccount: #CLASS FOR SAVING ACCOUNT\n def __init__(self):\n self.conn = cx_Oracle.connect('TEST/root@xe')\n print(\"connected\")\n self.cur = self.conn.cursor()\n\n def open_account(self, c_id): #FUNCTION TO OPEN NEW SAVING ACCOUNT\n bal = int(input(\"enter balance you want to enter in your new account: \"))\n if bal<0:\n print(\"Please enter a valid amount.\\n\")\n return\n else:\n self.cur.execute(\"insert into SAVING_ACC values(ACCOUNT_NO_VAL1.NEXTVAL,:pram1,'ACTIVE',to_date(sysdate,'DD/MM/YY'),NUll,:pram2,0)\",(c_id, bal))\n self.conn.commit()\n print(\"Saving Account Open Successful.\\n\")\n\n def deposit(self, c_id): #FUNCTION TO DEPOSITE INTO SAVING ACCOUNT\n amount = int(input(\"enter amount to deposit: \"))\n if amount<=0: #CHECKING FOR VALID AMOUNT TO DEPOSIT\n print(\"Please enter a valid amount.\\n\")\n return\n else:\n self.cur.execute(\"select c_id from saving_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not have a saving account.\\n\")\n return\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n new_amount = ret + amount\n self.cur.execute(\"UPDATE saving_acc SET balance=:1 where c_id=:2\", (new_amount, c_id))\n # self.conn.commit()\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'deposit',:2,:3,'Saving Account')\",(c_id, amount, new_amount))\n self.conn.commit()\n print(\"Amount Deposited Successfully\\n\")\n\n def withdraw(self, c_id): #FUNCTION TO WITHDRAW FROM SAVING ACCOUNT\n self.cur.execute(\"select c_id from saving_acc\") #CHECKING FOR C_ID WHETHER IT EXISTS OR NOT\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not have a saving account\\n\")\n return\n self.cur.execute(\"select WITHDRAWL from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n if ret >= 9:\n print(\"you have exceeded the per month withdrawl limit.\\n\")\n return\n amount = int(input(\"enter amount to withdraw: \"))\n if amount<=0: #CHECKING FOR VALID AMOUNT\n print(\"Please enter a valid amount.\\n\")\n return\n else:\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n if ret < amount:\n print(\"your account has \" + str(ret) + \" amount\")\n return\n else:\n ret = ret - amount\n self.cur.execute(\"UPDATE saving_acc SET balance=:1 where c_id=:2\", (str(ret), c_id))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'withdrawl',:2,:3,'Saving Account')\",(c_id, amount, ret))\n self.conn.commit()\n self.cur.execute(\"select WITHDRAWL from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret + 1\n self.cur.execute(\"UPDATE saving_acc SET WITHDRAWL=:1 where c_id=:2\", (str(ret), c_id))\n self.conn.commit()\n print(\"Amount Withdrawn Successfully\\n\")\n\n def transfer(self, c_id):\n print(\"enter amount to transfer: \")\n amount = int(input())\n if amount<=0:\n print(\"Please enter a valid amount.\\n\")\n return\n else:\n self.cur.execute(\"select c_id from saving_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not have a saving account.\\n\")\n return\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n if ret < amount:\n print(\"your account has \" + str(ret) + \" amount\")\n return\n print(\"enter customer ID in which you want to transfer the amount: \")\n c_id1 = input()\n print(\"1. transfer into saving account\\n2. transfer into current account\")\n ch = int(input())\n if ch == 1:\n try:\n self.cur.execute(\"select c_id from saving_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id1) in ret):\n pass\n else:\n print(c_id1 + \" does not have a saving account.\\n\")\n return\n self.cur.execute(\"select status from saving_acc where c_id=\" + c_id1 + \"\")\n spr = self.cur.fetchall()\n spr = list(sum(spr, ()))\n spr = spr[0]\n if str(spr) == \"LOCK\" or str(spr) == \"CLOSE\":\n print(c_id1 + \" is locked or closed.\\n\")\n return\n if str(c_id)==str(c_id1):\n print(\"\\n you can not transfer the amount to same account\")\n return\n else:\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret - amount\n self.cur.execute(\"UPDATE saving_acc SET balance=:1 where c_id=:2\", (str(ret), c_id))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'transfer',:2,:3,'Saving Account')\",(c_id, amount, ret))\n self.conn.commit()\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id1 + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret + amount\n self.cur.execute(\"UPDATE saving_acc SET balance=:1 where c_id=:2\", (str(ret), c_id1)) #UPDATE BALANCE FOR SAVING ACCOUNT\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'deposit',:2,:3,'Saving Account')\",(c_id1, amount, ret)) #UPDATE HISTORY TABLE\n self.conn.commit()\n print(\"Transaction successful.\\n\")\n except Exception as e:\n print(e)\n elif ch == 2:\n try:\n self.cur.execute(\"select c_id from current_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id1) in ret):\n pass\n else:\n print(c_id1 + \" does not have a current account\\n\")\n return\n self.cur.execute(\"select status from current_acc where c_id=\" + c_id1 + \"\")\n spr = self.cur.fetchall()\n spr = list(sum(spr, ()))\n spr = spr[0]\n if str(spr) == \"LOCK\" or str(spr) == \"CLOSE\":\n print(c_id1 + \" is locked or closed.\")\n return\n else:\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret - amount\n self.cur.execute(\"UPDATE saving_acc SET balance=:1 where c_id=:2\", (str(ret), c_id))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'transfer',:2,:3,'Saving Account')\",(c_id, amount, ret))\n self.conn.commit()\n self.cur.execute(\"select balance from current_acc where c_id=\" + c_id1 + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n ret = ret + amount\n self.cur.execute(\"UPDATE current_acc SET balance=:1 where c_id=:2\", (str(ret), c_id1))\n self.cur.execute(\"insert into history values(:1,to_date(sysdate,'DD/MM/YY'),'deposit',:2,:3,'Current Account')\",(c_id1, amount, ret))\n self.conn.commit()\n print(\"Transaction successful.\\n\")\n except Exception as e:\n print(e)\n\n def history(self, date1, date2, c_id): #FUNCTION TO STORE TRANSFER, WITHDROW AND DEPOSIT HISTORY\n self.cur.execute(\"select * from history where DATE_OF_TRANSACTION between to_date(:1,'DD/MM/YY') and to_date(:2,'DD/MM/YY') and c_id=:3\",(date1, date2, c_id))\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if len(ret) == 0:\n print(\"No history for this account\\n\")\n return\n print(\"Customer ID date of transaction action amount balance type of account\")\n for i in range(0, len(ret), 6):\n print(str(ret[0 + i]) + \" \" + str(ret[1 + i]) + \" \" + str(ret[2 + i]) + \" \" + str(ret[3 + i]) + \" \" + str(ret[4 + i]) + \" \" + str(ret[5 + i]))\n\n\nclass FixedDeposit: #CLASS TO STORE FIXED DEPOSITE INFORMATION\n def __init__(self):\n self.conn = cx_Oracle.connect('TEST/root@xe')\n print(\"connected\")\n self.cur = self.conn.cursor()\n\n def open_account(self, c_id):\n while True:\n bal = int(input(\"Enter balance you want to enter in your new FD account: \"))\n if bal < 1000: #CHECKING WHETHER THE AMOUNT IS CORRECT ACCORDING TO THE CONSTRAINT PROVIDED\n print(\"Balance of FD can not less than 1000, please retry\\n\")\n continue\n term = int(input(\"Enter the term for your fixed deposit in months: \"))\n if term < 12: #CHECKING WHETHER THE TERM IS CORRECT ACCORDING TO THE CONSTRAINT PROVIDED\n print(\"Term can not be less than 12 months, please retry\\n\")\n continue\n else:\n break\n self.cur.execute(\"insert into FIXED_DEPOSITE values(FD_ACCOUNT_NO_VAL.NEXTVAL,:1,to_date(sysdate,'DD/MM/YY'),:2,:3)\",(c_id, bal, term))\n self.conn.commit()\n print(\"FIXED DEPOSITE account created.\\n\")\n\n\nclass Loan:\n def __init__(self):\n self.conn = cx_Oracle.connect('TEST/root@xe')\n print(\"connected\")\n self.cur = self.conn.cursor()\n\n def open_account(self, c_id):\n self.cur.execute(\"select c_id from saving_acc\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not have a saving account\\n\")\n return\n while True:\n amount = int(input(\"enter loan amount: \"))\n if amount<=0:\n print(\"Please enter a valid amount.\\n\")\n else:\n if (amount % 1000) != 0:\n print(\"please enter the valid amount in multiple of 1000\\n\")\n continue\n else:\n self.cur.execute(\"select balance from saving_acc where c_id=\" + c_id + \"\")\n ret = self.cur.fetchall()\n ret = list(sum(ret, ()))\n ret = ret[0]\n if amount > int(2 * int(ret)):\n print(\"You can not avail loan greater than twice the amount in your saving account\\n\")\n continue\n else:\n break\n while True:\n term = int(input(\"enter term of loan: \"))\n if term <= 0:\n print(\"term can not be\", term)\n continue\n else:\n break\n self.cur.execute(\"insert into LOAN values(LOAN_VAL.NEXTVAL,:1,to_date(sysdate,'DD/MM/YY'),:2,:3)\",(c_id, amount, term))\n self.conn.commit()\n print(\"Loan availed.\\n\")\n\n\nconn = cx_Oracle.connect('TEST/root@xe') #ESTABLISHING CONNECTION WITH DATABASE( DATABASE NAME- TEST, PASSWORD- ROOT )\ncur = conn.cursor() #CREATING AN OBJECT OF CURSOR CLASS TO EXECUTE QUERIES\nlock = 0\nadmin_name = \"\"\nadmin_pass = \"\"\nsa1 = \"\"\nsa2 = \"\"\nadmin_status = \"ACTIVE\"\ncur.execute(\"select to_date(sysdate,'DD/MM/YY') from dual\") #CHECKING FOR 1ST DATE OF MONTH TO UPDATE THE WITHDRAWL LIMIT FOR ALL CUSTOMERS\nret = cur.fetchall()\nret = list(sum(ret, ()))\nret = ret[0]\nret = str(ret)\nret = ret.split('-')\nday = str(ret[2])\nday = int(day[0:2])\nif int(day) == 1: #UPDATING WITHDRWAL LIMIT\n cur.execute(\"update saving_acc SET WITHDRAWL='0'\")\n conn.commit()\nwhile (True):\n print(\"1. Sign Up\\n2. Sign In\\n3. Admin Sign In\\n4. Quit\") #MAIN MENU\n choice = int(input())\n if choice == 1:\n fname = str(input(\"enter first name: \"))\n if len(fname) == 0:\n print(\"FName cannot be empty, try again\\n\")\n continue\n lname = str(input(\"enter last name: \"))\n if len(lname) == 0:\n print(\"LName cannot be empty, try again\\n\")\n continue\n mobno = int(input(\"enter mobile no: (+91)\"))\n add1 = input(\"enter address Line 1: \")\n if len(add1) == 0:\n print(\"Line 1 address cannot be empty, try again\\n\")\n continue\n add2 = input(\"enter address Line 2: \")\n if len(add2) == 0:\n print(\"Line 2 address cannot be empty, try again\\n\")\n continue\n city = input(\"enter city: \")\n if len(city) == 0:\n print(\"City cannot be empty, try again\\n\")\n continue\n state = input(\"enter state: \")\n if len(state) == 0:\n print(\"State cannot be empty, try again\\n\")\n continue\n pin = input(\"enter pin: \")\n gen = input(\"enter gender: \")\n if len(gen) == 0:\n print(\"Gender cannot be empty, try again\\n\")\n continue\n email = input(\"enter email: \")\n if len(email) == 0:\n print(\"Email field cannot be empty, try again\\n\")\n continue\n while True:\n passw = input(\"enter password: \")\n import re\n\n pro = re.compile(\"^(?=.*[A-Za-z])(?=.*\\d)[A-Za-z\\d]{8,}$\")\n if pro.match(passw):\n break\n else:\n print(\"password must be ALPHA-NUMERIC and 8 CHARACTERS LONG, please retry\")\n continue\n obj = customer()\n obj.new_coustomer(fname, lname, mobno, add1, add2, city, state, pin, gen, email, passw)\n elif choice == 2:\n\n print(\"enter your coustomer ID\")\n cus_id = str(input())\n if len(cus_id) == 0:\n print(\"Enter the Customer ID, it cannot be blank.\\n\")\n continue\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(cus_id) in ret):\n pass\n else:\n print(cus_id + \" does not exist\\n\")\n continue\n try:\n cur.execute(\"select status from saving_acc where c_id=\" + cus_id + \"\")\n sa1 = cur.fetchall()\n sa1 = list(sum(sa1, ()))\n sa1 = sa1[0]\n except Exception as e:\n pass\n try:\n cur.execute(\"select status from CURRENT_ACC where c_id=\" + cus_id + \"\")\n sa2 = cur.fetchall()\n sa2 = list(sum(sa1, ()))\n sa2 = sa2[0]\n except Exception as e:\n pass\n if sa1 == \"LOCK\" or sa2 == \"LOCK\": #CHECKING WHETHER THE ACCOUNT IS LOCKED OR NOT\n print(\"your account has been locked, contact the bank.\\n\")\n continue\n print(\"enter your password: \")\n cus_pass = input()\n try:\n cur.execute(\"select password from customer where C_ID=\" + cus_id + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n lock = 0\n while (True):\n if ret[0] == cus_pass:\n print(\"Sign in successful\\n\")\n break\n else:\n if lock == 2: #CHECK FOR ACCOUNT LOCK CONITION\n try:\n cur.execute(\"UPDATE SAVING_ACC SET status='LOCK' where c_id=\" + cus_id + \"\") #LOCKING THE ACCOUNT\n conn.commit()\n print(\"Saving Account has been locked.\\n\")\n except Exception as e:\n pass\n try:\n cur.execute(\"UPDATE CURRENT_ACC SET status='LOCK' where c_id=\" + cus_id + \"\")\n conn.commit()\n print(\"Current Account has been locked.\\n\")\n except Exception as e:\n pass\n finally:\n exit()\n print(\"Wrong password. Please enter passowrd again\\n\")\n cus_pass = input()\n lock += 1\n while True:\n print(\"1. Address change\\n2. Open New Account\\n3. Money Deposit\\n4. Money Withdrawl\\n5. Transfer Money\\n6. Print statement\\n7. Account closure\\n8. Avail Loan\\n0. customer logout\")\n ins = int(input())\n if ins == 1:\n add1 = input(\"enter address Line 1 \")\n add2 = input(\"enter address Line 2 \")\n city = input(\"enter city \")\n state = input(\"enter state \")\n pin = input(\"enter pin \")\n cur.execute(\"UPDATE customer SET C_ADD_LINE1=:1,C_ADD_LINE2=:2,C_ADD_CITY=:3,C_ADD_STATE=:4,C_ADD_PIN=:5 WHERE C_ID=:6\",(add1, add2, city, state, pin, cus_id))\n conn.commit()\n elif ins == 2:\n print(\"1. open Saving Account\\n2. open Current Account\\n3. open Fixed Deposit Account\")\n ci = int(input())\n if ci == 1:\n cur.execute(\"select c_id from saving_acc\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(cus_id) in ret):\n print(cus_id + \" already have a Saving Account\\n\")\n continue\n else:\n obj = SavingAccount()\n obj.open_account(cus_id)\n elif ci == 2:\n cur.execute(\"select c_id from current_acc\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(cus_id) in ret):\n print(cus_id + \" already have a Current Account\\n\")\n continue\n else:\n obj = CurrentAccount()\n obj.open_account(cus_id)\n elif ci == 3:\n obj = FixedDeposit()\n obj.open_account(cus_id)\n elif ins == 3:\n print(\"1. Saving Account\\n2. Current Account\")\n ac = int(input())\n if ac == 1:\n if sa1 == \"CLOSE\":\n print(\"your SAVING ACCOUNT has been closed\\n\")\n continue\n obj = SavingAccount()\n obj.deposit(cus_id)\n elif ac == 2:\n if sa2 == \"CLOSE\":\n print(\"your CURRENT ACCOUNT has been closed\\n\")\n continue\n obj = CurrentAccount()\n obj.deposit(cus_id)\n elif ins == 4:\n print(\"1. Saving Account\\n2. Current Account\")\n ac = int(input())\n if ac == 1:\n if sa1 == \"CLOSE\":\n print(\"your SAVING ACCOUNT has been closed\\n\")\n continue\n obj = SavingAccount()\n obj.withdraw(cus_id)\n elif ac == 2:\n if sa2 == \"CLOSE\":\n print(\"your CURRENT ACCOUNT has been closed\\n\")\n continue\n obj = CurrentAccount()\n obj.withdraw(cus_id)\n elif ins == 6:\n date1 = input(\"first date in dd/mm/yyyy format\")\n date2 = input(\"second date in dd/mm/yyyy format\")\n obj = SavingAccount()\n obj.history(date1, date2, cus_id)\n elif ins == 5:\n print(\"1. to transfer from Saving Account\\n2. to transfer from Current Account\") #MENU ASKING WHICH TYPE OF ACCOUNT WE WANT TO TRANSFER FROM\n ac = int(input())\n if ac == 1:\n obj = SavingAccount()\n obj.transfer(cus_id)\n elif ac == 2:\n obj = CurrentAccount()\n obj.transfer(cus_id)\n elif ins == 7:\n print(\"Do you want to close account y/n: \")\n inp = input()\n if inp == 'y':\n print(\"1. close Saving Account\\n2. close Current Account\") #menu to close which user account\n ac = int(input())\n if ac == 1:\n cur.execute(\"select c_id from saving_acc\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(cus_id) in ret):\n pass\n else:\n print(cus_id + \" does not have a saving account.\\n\")\n continue\n cur.execute(\"UPDATE saving_acc SET status='CLOSE' where c_id=\" + str(cus_id) + \"\")\n conn.commit()\n cur.execute(\"UPDATE saving_acc SET END_DATE=to_date(sysdate,'DD/MM/YY') where c_id=\" + str(cus_id) + \"\")\n conn.commit()\n cur.execute(\"select BALANCE from saving_acc where c_id=\" + str(cus_id) + \"\")\n qw = cur.fetchall()\n qw = list(sum(qw, ()))\n qw = qw[0]\n print(\"The amount of \" + str(qw) + \" has been sent to your address. Thanks for banking with us :)\\n\")\n print(\"Account has been closed\\n\")\n elif ac == 2:\n cur.execute(\"select c_id from current_acc\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(cus_id) in ret):\n pass\n else:\n print(cus_id + \" does not have a current account.\\n\")\n continue\n cur.execute(\"UPDATE CURRENT_ACC SET status='CLOSE' where c_id=\" + str(cus_id) + \"\")\n conn.commit()\n cur.execute(\"UPDATE CURRENT_ACC SET END_DATE=to_date(sysdate,'DD/MM/YY') where c_id=\" + str(\n cus_id) + \"\")\n conn.commit()\n cur.execute(\"select BALANCE from current_acc where c_id=\" + str(cus_id) + \"\")\n qw = cur.fetchall()\n qw = list(sum(qw, ()))\n qw = qw[0]\n print(\"The amount of \" + str(qw) + \" has been sent to your address. Thanks for banking with us :)\")\n print(\"Account has closed\\n\")\n elif ins == 0:\n print(\"logout successful.\")\n break\n elif ins == 8:\n obj = Loan()\n obj.open_account(cus_id)\n except Exception as e:\n print(e)\n elif choice == 3:\n lock = 0\n cur.execute(\"select status from admin\")\n admin_status = cur.fetchall()\n admin_status = list(sum(admin_status, ()))\n admin_status = admin_status[0]\n if admin_status == \"LOCKED\":\n print(\"admin has been locked\\n\")\n continue\n adm_nam = input(\"enter name: \")\n adm_pass = input(\"enter password: \")\n cur.execute(\"select name from admin\")\n admin_name = cur.fetchall()\n admin_name = list(sum(admin_name, ()))\n admin_name = admin_name[0]\n cur.execute(\"select password from admin\")\n admin_pass = cur.fetchall()\n admin_pass = list(sum(admin_pass, ()))\n admin_pass = admin_pass[0]\n while True:\n if adm_nam == str(admin_name) and adm_pass == str(admin_pass):\n print(\"1. Print closed account history\\n2. FD Report of a customer\\n3. FD Report of Customer vis-a-vis another customer\\n4. FD Report w.r.t a particular FD amount\\n5. Loan Report of a Customer\\n6. Loan Report of customer vis-a-vis another customer\\n7. Loan report w.r.t a particular loan amount\\n8. Loan-FD report of customer\\n9. Report of customer who are yet to avail a loan\\n10. Report of customer who are yet to open an FD account\\n11. Report of customer who neither have a loan nor an FD account with the bank\\n0. Admin logout \")\n cho = int(input())\n if cho == 1:\n cur.execute(\"select c_id from saving_acc where status='CLOSE'\")\n ret1 = ret = cur.fetchall()\n ret1 = list(sum(ret, ()))\n print(\"Customer ID End Date\")\n for i in ret1:\n cur.execute(\"select END_DATE from saving_acc where c_id=\" + str(i) + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n print(str(i) + \" \" + str(ret[0]))\n\n cur.execute(\"select c_id from current_acc where status='CLOSE'\")\n ret1 = ret = cur.fetchall()\n ret1 = list(sum(ret, ()))\n for i in ret1:\n cur.execute(\"select END_DATE from current_acc where c_id=\" + str(i) + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n print(str(i) + \" \" + str(ret[0])) #printing customer id and End date\n elif cho == 2:\n c_id = input(\"enter a customer ID \")\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not exist\\n\")\n continue\n cur.execute(\"select c_id from FIXED_DEPOSITE\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(\"N.A\")\n continue\n cur.execute(\"select * from FIXED_DEPOSITE where c_id=\" + c_id + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n print(\"Account no customer ID Start date balance term\")\n for i in range(0, len(ret), 5):\n print(str(ret[0 + i]) + \" \" + str(ret[1 + i]) + \" \" + str(ret[2 + i]) + \" \" + str(ret[3 + i]) + \" \" + str(ret[4 + i]))\n elif cho == 3:\n c_id = input(\"enter a customer ID: \")\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not exist\")\n continue\n cur.execute(\"select c_id from FIXED_DEPOSITE\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(\"N.A\")\n continue\n cur.execute(\"select balance from FIXED_DEPOSITE where c_id=\" + c_id + \"\")\n ret1 = cur.fetchall()\n ret1 = list(sum(ret1, ()))\n ret1 = sum(ret1)\n cur.execute(\"select c_id from FIXED_DEPOSITE where balance>=\" + str(ret1) + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if len(ret) == 0:\n print(\"N.A no account has balance greater than: \", ret1)\n continue\n print(\"Account No customer ID balance term\")\n a = list()\n for i in ret:\n if i not in a:\n a.append(i)\n cur.execute(\"select FD_ACCOUNT_NO,C_ID,BALANCE,TERM from FIXED_DEPOSITE where c_id=\" + str(i) + \"\")\n ret2 = cur.fetchall()\n ret2 = list(sum(ret2, ()))\n for j in range(0, len(ret2), 4):\n print(str(ret2[0]) + \" \" + str(ret2[1]) + \" \" + str(ret2[2]) + \" \" + str(ret2[3]))\n elif cho == 4:\n while True:\n amount = int(input(\"enter a amount: \"))\n if amount < 0:\n print(\"invalid amount retry.\\n\")\n continue\n elif int(amount % 1000) != 0:\n print(\"enter amount in multiple of 1000, retry\\n\")\n continue\n else:\n break\n\n cur.execute(\"select c_id from FIXED_DEPOSITE where balance>=\" + str(amount) + \"\")\n ret2 = cur.fetchall()\n ret2 = list(sum(ret2, ()))\n if len(ret2) == 0:\n print(\"N.A.\\n\")\n continue\n print(\"customer ID first name last name FD Amount\")\n a = list()\n for i in ret2:\n if i not in a:\n a.append(i)\n cur.execute(\"select C.c_id, C.C_FNAME, C.C_LNAME, B.BALANCE from customer C,FIXED_DEPOSITE B where C.c_id=B.c_id and C.c_id=\" + str(i) + \"\")\n ret1 = cur.fetchall()\n ret1 = list(sum(ret1, ()))\n for j in range(0, len(ret1), 4):\n if int(ret1[3 + j])>int(amount):\n print(str(ret1[0 + j]) + \" \" + str(ret1[1 + j]) + \" \" + str(ret1[2 + j]) + \" \" + str(ret1[3 + j]))\n elif cho == 5:\n c_id = input(\"enter a customer ID: \")\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not exist\")\n continue\n cur.execute(\"select c_id from loan\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(\"Not Availed\\n\")\n continue\n cur.execute(\"select * from loan where c_id=\" + c_id + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n print(\"Account no Customer ID Start date Loan Amount Term\")\n for i in range(0, len(ret), 5):\n print(str(ret[0 + i]) + \" \" + str(ret[1 + i]) + \" \" + str(ret[2 + i]) + \" \" + str(ret[3 + i]) + \" \" + str(ret[4 + i]))\n elif cho == 6:\n c_id = input(\"enter a customer ID: \")\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(c_id + \" does not exist.\\n\")\n continue\n cur.execute(\"select c_id from loan\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if (int(c_id) in ret):\n pass\n else:\n print(\"N.A, account doesn't have loan\\n\")\n continue\n cur.execute(\"select LOAN_AMOUNT from loan where c_id=\" + c_id + \"\")\n ret1 = cur.fetchall()\n ret1 = list(sum(ret1, ()))\n ret1 = sum(ret1)\n cur.execute(\"select c_id from loan where LOAN_AMOUNT>=\" + str(ret1) + \"\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n if len(ret) == 0:\n print(\"N.A no account has balance greater than\", ret1)\n continue\n print(\"Account No customer ID balance term\")\n a = list()\n for i in ret:\n if i not in a:\n a.append(i)\n cur.execute(\"select L_ACCOUNT_NO,C_ID,LOAN_AMOUNT,TERM from loan where c_id=\" + str(i) + \"\")\n ret2 = cur.fetchall()\n ret2 = list(sum(ret2, ()))\n for j in range(0, len(ret2), 4):\n print(str(ret2[0 + j]) + \" \" + str(ret2[1 + j]) + \" \" + str(\n ret2[2 + j]) + \" \" + str(ret2[3 + j]))\n elif cho == 7:\n while True:\n amount = int(input(\"enter an amount: \"))\n if amount < 0: #CHECKING FOR VALID AMOUNT\n print(\"invalid amount retry\\n\")\n continue\n elif int(amount % 1000) != 0:\n print(\"enter amount in multiple of 1000, retry\\n\")\n continue\n else:\n break\n\n cur.execute(\"select c_id from loan where LOAN_AMOUNT>=\" + str(amount) + \"\")\n ret2 = cur.fetchall()\n ret2 = list(sum(ret2, ()))\n if len(ret2) == 0:\n print(\"N.A, so such accounts.\\n\")\n continue\n print(\"customer ID first name last name Loan Amount\")\n a = list()\n for i in ret2:\n if i not in a:\n a.append(i)\n cur.execute(\"select C.c_id, C.C_FNAME, C.C_LNAME, B.LOAN_AMOUNT from customer C,loan B where C.c_id=B.c_id and B.c_id=\" + str(i) + \"\")\n ret1 = cur.fetchall()\n ret1 = list(sum(ret1, ()))\n for j in range(0, len(ret1), 4):\n if int(ret1[3 + j])>int(amount):\n print(str(ret1[0 + j]) + \" \" + str(ret1[1 + j]) + \" \" + str(ret1[2 + j]) + \" \" + str(ret1[3 + j]))\n elif cho == 8:\n cur.execute(\"select c_id from loan\")\n retl = cur.fetchall()\n retl = list(sum(retl, ()))\n if len(retl) == 0:\n print(\"no accounts in loan.\\n\")\n continue\n cur.execute(\"select c_id from FIXED_DEPOSITE\")\n retf = cur.fetchall()\n retf = list(sum(retf, ()))\n if len(retf) == 0:\n print(\"no accounts in FIXED_DEPOSITE.\\n\")\n continue\n cl = list()\n if len(retl) < len(retf):\n l = retl\n s = retf\n else:\n l = retf\n s = retl\n for i in l:\n if i in s and i not in cl:\n cl.append(i)\n if len(cl) == 0:\n print(\"no customer has both loan and FD account.\\n\")\n continue\n print(\"customer ID first name last name sum of loan amount sum of FD account\")\n ft=0\n for i in cl:\n cur.execute(\"select LOAN_AMOUNT from loan where c_id=\" + str(i) + \"\")\n ret1 = cur.fetchall()\n ret1 = list(sum(ret1, ()))\n ret1 = sum(ret1)\n cur.execute(\"select balance from FIXED_DEPOSITE where c_id=\" + str(i) + \"\")\n ret2 = cur.fetchall()\n ret2 = list(sum(ret2, ()))\n ret2 = sum(ret2)\n if ret1 > ret2:\n ft=1\n cur.execute(\"select c_id,C_FNAME,C_LNAME from customer where c_id=\" + str(i) + \"\")\n ret3 = cur.fetchall()\n ret3 = list(sum(ret3, ()))\n print(str(ret3[0]) + \" \" + str(ret3[1]) + \" \" + str(ret3[2]) + \" \" + str(ret1) + \" \" + str(ret2))\n if ft==0: #CHECKING IF THERE IS NO SUM OF LOAN GREATER THAN SUM OF FIXED DEPOSIT\n print(\"\\n\")\n print(\"No person has sum of loan greater than sum of fixed deposit amount\\n\")\n elif cho == 9:\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n cur.execute(\"select c_id from loan\")\n retl = cur.fetchall()\n retl = list(sum(retl, ()))\n ul = list()\n for i in retl:\n if i not in ul:\n ul.append(i)\n LA = list()\n ret = set(ret)\n ul = set(ul)\n LA = ret - ul\n if len(LA) == 0:\n print(\"all customers have availed loan\\n\")\n continue\n print(\"customer ID First name Last name\")\n for i in LA:\n cur.execute(\"select c_id,C_FNAME,C_LNAME from customer where c_id=\" + str(i) + \"\")\n ret3 = cur.fetchall()\n ret3 = list(sum(ret3, ()))\n print(str(ret3[0]) + \" \" + str(ret3[1]) + \" \" + str(ret3[2]))\n elif cho == 10:\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n cur.execute(\"select c_id from FIXED_DEPOSITE\")\n retl = cur.fetchall()\n retl = list(sum(retl, ()))\n ul = list()\n for i in retl:\n if i not in ul:\n ul.append(i)\n LA = list()\n ret = set(ret)\n ul = set(ul)\n LA = ret - ul\n if len(LA) == 0:\n print(\"all customers have FIXED DEPOSITE Account\\n\")\n continue\n print(\"customer ID First name Last name\")\n for i in LA:\n cur.execute(\"select c_id,C_FNAME,C_LNAME from customer where c_id=\" + str(i) + \"\")\n ret3 = cur.fetchall()\n ret3 = list(sum(ret3, ()))\n print(str(ret3[0]) + \" \" + str(ret3[1]) + \" \" + str(ret3[2]))\n elif cho == 11:\n cur.execute(\"select c_id from customer\")\n ret = cur.fetchall()\n ret = list(sum(ret, ()))\n cur.execute(\"select c_id from FIXED_DEPOSITE\")\n ret1 = cur.fetchall()\n ret1 = list(sum(ret1, ()))\n cur.execute(\"select c_id from loan\")\n ret2 = cur.fetchall()\n ret2 = list(sum(ret2, ()))\n for i in ret1:\n if i not in ret2:\n ret2.append(i)\n ul = list()\n for i in ret2:\n if i not in ul:\n ul.append(i)\n na = list()\n ul = set(ul)\n ret = set(ret)\n na = ret - ul\n if len(na) == 0:\n print(\"all customers have FIXED DEPOSITE or loan Account\\n\")\n continue\n print(\"customer ID First name Last name\")\n for i in na:\n cur.execute(\"select c_id,C_FNAME,C_LNAME from customer where c_id=\" + str(i) + \"\")\n ret3 = cur.fetchall()\n ret3 = list(sum(ret3, ()))\n print(str(ret3[0]) + \" \" + str(ret3[1]) + \" \" + str(ret3[2]))\n\n elif cho == 0:\n print(\"Logout successful\\n\")\n break\n else:\n if lock == 2:\n cur.execute(\"update admin set status='LOCKED'\")\n print(\"Admin has been locked\\n\")\n break\n print(\"wrong password or user name retry\\n\")\n adm_nam = input(\"enter name: \")\n adm_pass = input(\"enter password: \")\n lock += 1\n elif choice == 4:\n try:\n conn.close() #CLOSE THE CONNECTION TO THE DATABASE\n except Exception as e:\n pass\n finally:\n print(\"Have a nice day... Hope to see you soon :)\") #ENDING QUOTE :)\n exit() #EXITING FROM THE APPLICATION\n" } ]
1
jaaguilarb/to_flat_array
https://github.com/jaaguilarb/to_flat_array
6f035699f6f2374441aea796b91e8983d6bedad5
756bb3c5910729e949bee08be81062f72b9c33c9
2bfeea25975ec7aeb3ecc73ba0f6b207c7e7bc69
refs/heads/master
2021-01-12T07:10:29.997573
2016-12-22T22:52:51
2016-12-22T22:52:51
76,923,932
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5623762607574463, "alphanum_fraction": 0.5623762607574463, "avg_line_length": 17.346153259277344, "blob_id": "94788c14e46267e05ed94d20b1a248361ec90d9f", "content_id": "a5c792d8c3262d8952d46478c52da894140f26f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "permissive", "max_line_length": 70, "num_lines": 26, "path": "/to_flat_array2_7.py", "repo_name": "jaaguilarb/to_flat_array", "src_encoding": "UTF-8", "text": "\r\nflat = []\r\n\r\ndef review(x):\r\n \"\"\"Unpack a nested array or put the integer element in flat array.\r\n\r\n Keyword arguments:\r\n x -- the element, can be an integer or an nested array\r\n\r\n \"\"\"\r\n global flat\r\n if isinstance(x,int):\r\n flat.append(x)\r\n else:\r\n map(review,x)\r\n return flat\r\n\r\ndef to_flat(na):\r\n \"\"\"Convert to flat array.\r\n\r\n Keyword arguments:\r\n na -- the nested arrays of integers\r\n\r\n \"\"\"\r\n print na\r\n fa = map(review,na)\r\n return flat\r\n" }, { "alpha_fraction": 0.7240356206893921, "alphanum_fraction": 0.7418397665023804, "avg_line_length": 27.08333396911621, "blob_id": "3772a90f568ae9d7636989f635d432d15ebf1c71", "content_id": "985a1d93b8bc2b8a16d5ae06a66c14d50af2195f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 337, "license_type": "permissive", "max_line_length": 84, "num_lines": 12, "path": "/README.md", "repo_name": "jaaguilarb/to_flat_array", "src_encoding": "UTF-8", "text": "# to_flat_array\nConvert nested array to flat array.\n\n<h2><strong>What is to_flat_array?</strong></h2>\n\nto_flat_array is a function to transform nested arrays to flat arrays.\n\nNote: actually only run with python 2.7.\n\n<h2><strong>License</strong></h2>\n\nto_flat_array is licensed under the terms of the MIT License (see the file LICENSE).\n" }, { "alpha_fraction": 0.522357702255249, "alphanum_fraction": 0.5711382031440735, "avg_line_length": 25.33333396911621, "blob_id": "00ea1256df01b5a4c8645fc372869453b407dac0", "content_id": "319aa8540d6d0632e3186f60d93341d687c7b655", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "permissive", "max_line_length": 70, "num_lines": 18, "path": "/test_to_flat.py", "repo_name": "jaaguilarb/to_flat_array", "src_encoding": "UTF-8", "text": "import unittest\r\nfrom to_flat_array2_7 import to_flat, print_flat\r\n\r\n##\r\n# Test unpacking nested array into flat array\r\n##\r\nclass ToFlatTestCase(unittest.TestCase):\r\n ca = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n na = [0,[1,2,3],[[[4],[5]],[6]],[7,8],9]\r\n\r\n \"\"\"Tests for `to_flat_array2_7.py`.\"\"\"\r\n\r\n def test_to_flat(self):\r\n \"\"\"Is result a flat array?\"\"\"\r\n self.assertEqual(to_flat(ToFlatTestCase.na),ToFlatTestCase.ca)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n" } ]
3
dizy64/workspace
https://github.com/dizy64/workspace
615d7cb0e2004fba3de099bb85149be57b283cc2
4215b574d79ca15666ef9fba6d371ca289367461
59db651ed72f04d77e1d054245dbc284914a38a0
refs/heads/master
2015-08-11T19:36:37.541804
2014-07-06T15:47:42
2014-07-06T15:47:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4821114242076874, "alphanum_fraction": 0.49384164810180664, "avg_line_length": 25.65625, "blob_id": "e12a59ef339f076051921d265f182e802a95dfd9", "content_id": "ac093f97bf81db208d7f91a02722a8dd6ff9003c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1803, "license_type": "no_license", "max_line_length": 112, "num_lines": 64, "path": "/main.py", "repo_name": "dizy64/workspace", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#실습 단계가 아니니까 개념적으로만 이해하자\n\nimport urllib2\nimport time\nfrom bs4 import BeautifulSoup\nimport re\ntarget_uri = \"http://melon.com/chart/real/list.htm\"\nsave_file = \"melon.txt\"\n\ndef update():\n tmp = urllib2.urlopen(target_uri)\n now = time.localtime()\n print \"%d년 %d월 %d일 멜론 자료 업데이트 되었습니다.\" % (now.tm_year, now.tm_mon, now.tm_mday)\n with open(\"melon.html\", \"w\") as f:\n f.write(tmp.read())\n \n return tmp.read()\n\n\ndef chart():\n soup = BeautifulSoup(update())\n \n rank = 0\n count = 1\n \n tracktitle = \"\"\n artist = \"\"\n albumtitle = \"\"\n \n f = open(\"melon.txt\", \"w\")\n f.write(\"==================== Melon Top 100 ====================\\n\")\n \n divs = soup.find_all(\"div\", { \"class\" : re.compile(\"^ellipsis\")} )\n \n for item in divs:\n if (count % 3) == 1:\n tracktitle = str(item.get_text())\n \n elif (count % 3) == 2:\n artist = str(item.a.get_text()) \n \n elif (count % 3) == 0:\n albumtitle = str(item.get_text())\n if rank == 0:\n f.write(\"추 천\\t%s - %s [%s]\\n\" % (tracktitle.strip(), artist.strip(), albumtitle.strip()))\n print \"추 천\\t%s - %s [%s]\\n\" % (tracktitle.strip(), artist.strip(), albumtitle.strip())\n \n else :\n f.write(\"%3d위\\t%s - %s [%s]\\n\" % (rank, tracktitle.strip(), artist.strip(), albumtitle.strip()))\n print \"%3d위\\t%s - %s [%s]\\n\" % (rank, tracktitle.strip(), artist.strip(), albumtitle.strip())\n \n rank = rank + 1\n \n count = count + 1\n \n print \"melon.txt에 저장되었습니다.\" \n f.close() \n \n\n\nchart()" } ]
1
No31LE/lesson5
https://github.com/No31LE/lesson5
530b5e45823fb6aecc7556712b714bbc35e5f73f
b7e4faca84776724529162684dca90f7cb12278e
003182b7baa5244a1068201ca0f3c8765903c78c
refs/heads/main
2023-06-15T20:42:36.400431
2021-07-10T06:03:18
2021-07-10T06:03:18
384,623,013
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6150943636894226, "alphanum_fraction": 0.6377358436584473, "avg_line_length": 18, "blob_id": "0742571c7c14c982e73f326a7b774433bb963578", "content_id": "d1a72bea7b156bee8944b8da63d224aeee0946f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 47, "num_lines": 14, "path": "/python_homwork_lesson5.py", "repo_name": "No31LE/lesson5", "src_encoding": "UTF-8", "text": "score_list = list()\npeople = int(input('''amount of people?\n'''))\n\n\nfor i in range(1,people + 1):\n score_list.append(int(input('''score? 1-100\n''')))\n\n\n\nprint ('average:', sum(score_list)/people)\nprint ('highest',max(score_list))\nprint ('lowest',min(score_list))" } ]
1
EmmanuelMPaul/webscraper
https://github.com/EmmanuelMPaul/webscraper
dc1f5ac28c58a35ee126c4dc5c88bf3276f3e827
85610adda52ea04cbc0016c5b530b7d3b284b964
7caab6b9ca3e063d4b5d77f38fc869f9fd26cbad
refs/heads/master
2020-07-21T08:09:27.433638
2019-09-07T16:23:50
2019-09-07T16:23:50
206,791,420
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.41806381940841675, "alphanum_fraction": 0.42022714018821716, "avg_line_length": 33.88679122924805, "blob_id": "1ae853356aa847d14097061ca0694aceec59c975", "content_id": "4d6758032d6c06c88c6bf62657768890550ba753", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1849, "license_type": "no_license", "max_line_length": 109, "num_lines": 53, "path": "/localscraper.py", "repo_name": "EmmanuelMPaul/webscraper", "src_encoding": "UTF-8", "text": "import os\n\nprint('************************************************************')\nprint('***************** Welcome to EC254 local scraper ***********')\nprint('************************************************************')\n\n\ndef scrape():\n check = True\n try:\n # initialize varibles\n source = input('\\tEnter file Name(e.g courses.txt): ')\n print('>>>scraping...')\n tags = open(source.strip(), \"r\", newline=\"\")\n\n # set shell file\n localshellfile = \"localcoursesdownloader.sh\"\n f = open(localshellfile, \"w+\", newline=\"\")\n f.write(\"#!/bin/bash \\r\\n\")\n\n # prepare shell file\n for tag in tags:\n if os.path.isdir(tag):\n print(tag + ' course already exists!')\n else:\n cmd = \"php codecourse download:course \" + tag\n f.write(cmd)\n f.write(\"echo\\r\\necho -n '************************************************************'\\r\\n\")\n f.write(\"echo\\r\\necho 'processing.... next course'\\r\\nsleep 5s\\r\\necho\\r\\n\")\n\n f.write(\"echo -n press Enter or cmd to exit \\r\\n\")\n f.write(\"read terminate\")\n f.close()\n\n except FileNotFoundError:\n check = False\n print('Sorry! file \"' + source + '\" does not exist')\n\n if check:\n print('************************************************************')\n print(\"* DONE: \" + localshellfile + \" shell files generated\")\n print('************************************************************')\n print('>>>downloading...')\n os.system(localshellfile)\n\n\nwhile True:\n scrape()\n runAgain = input('Enter (yes|YES) to continue or press Enter or cmd to exit: ')\n if runAgain.lower() == \"yes\":\n print('********************* New Run ***********************')\n else:\n break\n" }, { "alpha_fraction": 0.44864422082901, "alphanum_fraction": 0.451930969953537, "avg_line_length": 36.4461555480957, "blob_id": "a8342576dbab6b025981316d130ead4b13890a71", "content_id": "9381d04616acca696a95ea13f2c7561174e825ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2434, "license_type": "no_license", "max_line_length": 117, "num_lines": 65, "path": "/sscraper.py", "repo_name": "EmmanuelMPaul/webscraper", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\nprint('************************************************************')\nprint('************** Welcome to EC254 Snippet scraper **********')\nprint('************************************************************')\n\n\ndef scrape():\n # set files\n shellfilename = \"snippetdownloader.sh\"\n snippetsfilename = \"snippetlist.txt\"\n c = open(snippetsfilename, \"w+\", newline=\"\")\n f = open(shellfilename, \"w+\", newline=\"\")\n f.write(\"#!/bin/bash \\r\\n\") \n # initialize varibles\n start = input('\\tEnter START page number: ')\n end = input('\\tEnter END page number: ')\n print('>>>scraping...')\n # loop all pages\n for page in range(int(start), int(end) + 1, 1):\n response = requests.get('https://codecourse.com/library/all?free=false&page='+str(page)+'&type=snippet')\n soup = BeautifulSoup(response.text, 'html.parser')\n courses = soup.find_all('a', href=True)\n\n # loop courses in a page\n ignore = ''\n for course in courses:\n link = course['href']\n # validate link\n if re.search(r\"^/courses/\", link):\n tag = link[9:len(link)]\n if ignore != tag:\n if os.path.isdir(tag):\n print(tag + ' course already exists!')\n else:\n cmd = \"php codecourse download:course \" + tag + \"\\r\\n\"\n f.write(cmd)\n c.write(tag + \"\\r\\n\")\n f.write(\"echo\\r\\necho -n '************************************************************'\\r\\n\")\n f.write(\"echo\\r\\necho 'processing.... next course'\\r\\nsleep 5s\\r\\necho\\r\\n\")\n ignore = tag\n\n f.write(\"echo -n press Enter or cmd to exit \\r\\n\")\n f.write(\"read terminate\")\n f.close()\n c.close()\n\n print('************************************************************')\n print(\"*\\tDONE: \" + shellfilename + \" and \" + snippetsfilename + \"files generated\")\n print('************************************************************')\n print('>>>downloading...')\n os.system(shellfilename)\n\n\nwhile True:\n scrape()\n runAgain = input('Enter (yes|YES) to continue or press Enter or cmd to exit: ')\n\n if runAgain.lower() == \"yes\":\n print('********************* New Run ***********************')\n else:\n break\n" }, { "alpha_fraction": 0.6433172225952148, "alphanum_fraction": 0.6731078624725342, "avg_line_length": 41.82758712768555, "blob_id": "2923b1578fd2a256f34b264dc606a13dc2a077a1", "content_id": "68fee95e7ac4c4f8685a502fd97db2983f9bfd28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1242, "license_type": "no_license", "max_line_length": 140, "num_lines": 29, "path": "/README.md", "repo_name": "EmmanuelMPaul/webscraper", "src_encoding": "UTF-8", "text": "## About\n\n<p align=\"left\">Learning webscraping using BeautifulSoup library</p>\n<p align=\"left\">A simple tool for \ndownloading courses from - codecourse (premium account is required to use this tool)\n</P>\n\n## Requirements\n- [composer](https://getcomposer.org/) \n- [php >=7.1](https://github.com/php/php-src/releases)\n- [git](https://git-scm.com)\n- [python 3 ](https://www.python.org/downloads) \n\n## Usage\n\n```bash\n $ git clone https://github.com/EmmanuelMPaul/webscraper.git\n $ cd webscraper\n $ php codecourse auth:me Check if you're authenticated\n $ php codecourse auth:signin Sign in with your codecourse.com credentials\n $ pip install [pyhon packages(os,re,requests,BeautifulSoup)]\n $ python allscraper.py to download all types courses + snippet\n $ python cscraper.py to download courses only\n $ python sscraper.py to download snippets only\n $ python localscraper.py to download courses from a file source\n```\nvisit [codecourse](https://codecourse.com) to view the courses\n\n<p align=\"center\"><img src=\"https://repository-images.githubusercontent.com/206791420/546e5980-d0ca-11e9-9e2a-9b920b7a8304\" width=\"500\"></p>\n" } ]
3
netlabufjf/Modo-Scripts
https://github.com/netlabufjf/Modo-Scripts
876830997395b40e40da990c9388cc39c1969b68
a3a0acd50a18e755713bf4e1b8fc6a280f134ecf
84366fb4bb656c62dfbfe38b76ae2143dfffba03
refs/heads/master
2021-05-06T09:19:31.731758
2018-07-04T18:04:51
2018-07-04T18:04:51
114,056,937
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6287121772766113, "alphanum_fraction": 0.644281268119812, "avg_line_length": 28.51028823852539, "blob_id": "c24c040854e4c727bee0b9d1c761eda8bde53296", "content_id": "38785f4f5432e3852b07931c04c5362d9e27281c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21617, "license_type": "permissive", "max_line_length": 132, "num_lines": 729, "path": "/V2/new-data.py", "repo_name": "netlabufjf/Modo-Scripts", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[ ]:\n\n#get_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport datetime\nimport pytz\n\n\n# ## Lendo e filtrando os dados coletados da API\n\n# In[ ]:\n\n# Lendo dados coletados da API\ndf = pd.read_csv('../modo_data.csv', usecols=[0,2,3,5])\n\n\n# In[ ]:\n\ndef str_to_datetime(df_time):\n \"\"\" \n Reformatando de string para datetime.\n \n Parameters\n ----------\n df_time : pandas.DataFrame, string\n Dataframe com strings a serem convertidas para datetime.\n \n Returns\n ----------\n date_list : pandas.DataFrame, datetime\n Dataframe com valores em datetime para possíveis fusos de Vancouver.\n \n \"\"\"\n date_list = []\n \n # Formatos de fuso horário comum de Vancouver e \n # fuso horário característico de horário de verão\n format_string = ['%Y-%m-%d %H:%M:%S.%f-08:00', '%Y-%m-%d %H:%M:%S.%f-07:00',\n '%Y-%m-%d %H:%M:%S-08:00', '%Y-%m-%d %H:%M:%S-07:00']\n \n print(datetime.datetime.now())\n for date in df_time:\n for fmt in format_string:\n try:\n date_list.append(datetime.datetime.strptime(str(date), fmt))\n break\n except:\n pass\n \n print(datetime.datetime.now())\n return pd.DataFrame(date_list)\n\n\n# In[ ]:\n\ndef get_car_ids(car_list):\n \"\"\"\n Coleta todos os IDs de carros coletados, sem repetições, de uma lista.\n \n Parameters\n -----------\n car_list : int list ou pandas.DataFrame\n Lista de todos os IDs coletados.\n \n Returns\n ----------\n car_ids : int\n Lista com todos os IDs já coletados, sem repetições.\n \n Notes\n ---------\n A coleta dos IDs é realizada de tal forma para obter IDs de veículos que \n por utilização da API podem não ser retornados, como os que estão em manutenção\n não estão na frota atual.\n \"\"\"\n \n car_ids = []\n\n for car in car_list:\n if (car in car_ids):\n continue\n else:\n car_ids.append(car)\n \n return car_ids\n\n\n# In[ ]:\n\n# Retirando dados nan\ndf.dropna(axis=0, how='any', inplace=True)\n\n# Convertendo datetime strings para o tipo datetime\ndf['Capture_time'] = str_to_datetime(df['Capture_time'])\n\n# Coletando todos os IDs dos veículos\ncar_ids = get_car_ids(df['CarID'])\n\n\n# ## Porcentagem de carros ocupados a cada minuto\n\n# In[ ]:\n\ndef convert_datetime_timezone(dt, tz1, tz2):\n \"\"\"\n Converte uma hora no fuso UTC ou São Paulo para um provável fuso de Vancouver.\n \n Parameters\n ------------\n dt : unix timestamp\n Timestamp a ser convertido para outro fuso horário.\n \n tz1, tz2 : Timezone String\n Time zone atual e a que a hora irá ser convertida.\n \n Returns\n ----------\n dt : unix timestamp\n Timestamp já convertida para o fuso de Vancouver.\n \n \"\"\" \n \n tz1 = pytz.timezone(tz1)\n tz2 = pytz.timezone(tz2)\n\n dt = datetime.datetime.fromtimestamp(dt)\n dt = datetime.datetime.strptime(str(dt),\"%Y-%m-%d %H:%M:%S\")\n dt = tz1.localize(dt)\n dt = dt.astimezone(tz2)\n \n try:\n # Fuso horário comum de Vancouver\n dt = datetime.datetime.strptime(str(dt),\"%Y-%m-%d %H:%M:%S-08:00\")\n except:\n # Fuso horário característico de horário de verão em Vancouver\n dt = datetime.datetime.strptime(str(dt),\"%Y-%m-%d %H:%M:%S-07:00\")\n \n dt = int(dt.timestamp())\n\n return dt\n\n\n# In[ ]:\n\n# Eliminando intervalos de disponibilidade futuros\nprint ('Size with future interval: '+str(len(df)))\n\n# Separando o dataframe com os intervalos futuros\ndf_with_future = df\ndf = pd.DataFrame()\n\n# Ordenando por capture time e pelo start time\ndf_with_future.sort_values(by=['Capture_time', 'StartTime'], inplace=True)\n\nfor car in car_ids:\n # Refazendo o dataframe somente com os intervalos presentes\n df = df.append(df_with_future[df_with_future['CarID'] == car].drop_duplicates(subset='Capture_time', keep='first'))\n\nprint ('Size without future interval: '+str(len(df)))\n\ndf.sort_values(by='Capture_time', inplace=True)\n\n# Retirando dados nan\ndf.dropna(axis=0, how='any', inplace=True)\n\n\n# In[ ]:\n\nin_travel = 0\nandando_weekdays = []\nandando_weekends = []\n\n\n\n# Percorre todo o dataframe para verificar quais carros estão andando em dado minuto\nfor i in range(1, len(df)):\n capture_time_atual = int(df['Capture_time'].iloc[i].timestamp())\n\n capture_time_anterior = int(df['Capture_time'].iloc[i-1].timestamp())\n\n start_time = int(df['StartTime'].iloc[i])\n\n request_start = df['RequestStart'].iloc[i]\n\n # Enquanto está no mesmo minuto, é analisado se o carro está andando\n if (capture_time_atual == capture_time_anterior):\n if (start_time > request_start):\n in_travel += 1\n else:\n porcentagem = (in_travel/len(car_ids))*100\n \n # Verifica que a data está entre segunda(1) e sexta(5)\n if (int(datetime.datetime.fromtimestamp(capture_time_anterior).strftime('%w')) > 0 and \n int(datetime.datetime.fromtimestamp(capture_time_anterior).strftime('%w')) < 6):\n andando_weekdays.append([capture_time_anterior, in_travel, porcentagem])\n else:\n andando_weekends.append([capture_time_anterior, in_travel, porcentagem])\n in_travel = 0\n \ndfIn_Travel_weekdays = pd.DataFrame(andando_weekdays, columns=['capture_time', 'total_in_travel', 'percentage'])\ndfIn_Travel_weekends = pd.DataFrame(andando_weekends, columns=['capture_time', 'total_in_travel', 'percentage'])\n\n\n# In[ ]:\n\ndef from_timestamp_list(timestamp_list):\n \n datetime_list = []\n \n for date in timestamp_list:\n datetime_list.append(datetime.datetime.fromtimestamp(int(date)))\n \n return pd.DataFrame(datetime_list)\n\n\n# In[ ]:\n\n# Formatando os dados de unix timestamp para datetime\n\ndfWeekdays = dfIn_Travel_weekdays\n\ndfWeekdays['capture_time'] = from_timestamp_list(dfWeekdays['capture_time']) \n \n \ndfWeekends = dfIn_Travel_weekends\n\ndfWeekends['capture_time'] = from_timestamp_list(dfWeekends['capture_time'])\n\n\n# In[ ]:\n\ndfWeekends.to_csv('weekends_v2.csv', index=False, encoding='utf-8')\ndfWeekdays.to_csv('weekdays_v2.csv', index=False, encoding='utf-8')\n\n\n# In[ ]:\n\n# Leitura de dados já processados se necessário\n\n# dfWeekends = pd.read_csv('weekends_v2.csv')\n# dfWeekdays = pd.read_csv('weekdays_v2.csv')\n\n# dfWeekdays['capture_time'] = pd.to_datetime(dfWeekdays['capture_time'])\n# dfWeekends['capture_time'] = pd.to_datetime(dfWeekends['capture_time'])\n\n\n# In[ ]:\n\n# Plot da porcentagem de carros alocados em dias de semana\nplt.plot(dfWeekdays['capture_time'],dfWeekdays['percentage'])\nplt.gcf().autofmt_xdate()\nplt.show()\n\n\n# In[ ]:\n\n# Plot da porcentagem de carros alocados em dias de final de semana\nplt.plot(dfWeekends['capture_time'],dfWeekends['percentage'])\nplt.gcf().autofmt_xdate()\nplt.show()\n\n\n# ## Porcentagem média de carros ocupados em cada minuto\n\n# In[ ]:\n\n# Faz a media das porcentagens para todos os minutos de uma certa quantidade de dias\ndef media(df, num_dias):\n \"\"\"\n Faz a media das porcentagens para todos os minutos em uma dada a quantidade de dias.\n \n Parameters\n ------------\n df : Pandas dataframe\n Dados a serem analisados, com uma coluna dos horários e outra com as porcentagens.\n \n num_dias : int\n Numero de dias total da coleta.\n \n Returns\n ----------\n media : Pandas dataframe\n Dados com a média das porcentagens para 24 horas.\n \n \"\"\"\n media = []\n minutes_of_day = 1440\n ant = -1\n valores = pd.DataFrame()\n # Loop que irá verificar um dia de registros(24h = 1440 min) visualizando cada minuto\n for i in range(minutes_of_day-1):\n count = 0\n # Irá percorrer os dias seguintes para encontrar as outras incidencias do mesmo minuto\n for j in range(i, num_dias * minutes_of_day, minutes_of_day-80): \n\n try:\n # Por conta de filtros os indices não estão exatos\n # Ele irá procurar em um intervalo o minuto desejado\n for c in range(j, j+3000):\n #Se tiver o mesma hora e minuto somamos a média, além de ser diferente do valor anterior\n if (df['capture_time'].iloc[c].minute == df['capture_time'].iloc[i].minute and \n df['capture_time'].iloc[c].hour == df['capture_time'].iloc[i].hour and\n int(df['capture_time'].iloc[c].day) != ant):\n \n valores = valores.append([df['percentage'].iloc[c]])\n \n #variavel para evitar pegar valores repetidos\n ant = int(df['capture_time'].iloc[c].day)\n \n # Atualiza o j para onde o minuto foi encontrado\n j=c\n count += 1\n break\n \n except Exception as e:\n #print(e)\n break\n print(\"Quant. dias: \"+str(count)+\" minutes: \"+str(i))\n \n # Registra somente a hora, media e desvio padrão das porcentagens dos dias\n media.append([df['capture_time'].iloc[i].strftime('%H:%M'), float(valores.mean()), float(valores.std())])\n valores = pd.DataFrame()\n\n media = pd.DataFrame(media, columns=['time', 'mean', 'std'])\n \n # Formatando a hora para datetime\n for i in range(len(media)):\n media['time'].iloc[i] = datetime.datetime.strptime(media['time'].iloc[i], '%H:%M').time()\n \n return media\n\n\n# In[ ]:\n\n# Fazendo a média das porcentagens de cada dia\ndfWeekdays = dfWeekdays.sort_values(by='capture_time')\nmediaWeekdays = media(dfWeekdays, 40)\n\ndfWeekends = dfWeekends.sort_values(by='capture_time')\nmediaWeekends = media(dfWeekends, 25)\n\n\n# In[ ]:\n\nmediaWeekdays.to_csv('mediaWeekdays_v2.csv', index=False, encoding='utf-8')\nmediaWeekends.to_csv('mediaWeekends_v2.csv', index=False, encoding='utf-8')\n\n\n# In[ ]:\n\n# Leitura de dados já processados se necessário\n\n# mediaWeekdays = pd.read_csv('mediaWeekdays_v2.csv')\n# mediaWeekends = pd.read_csv('mediaWeekends_v2.csv')\n\n\n# In[ ]:\n\n# Ordenando pelo tempo\nmediaWeekdays = mediaWeekdays.sort_values(by=['time'])\nmediaWeekends = mediaWeekends.sort_values(by=['time'])\n\n\n# ## Gráficos de porcentagem média de carros ocupados em cada minuto\n\n# In[ ]:\n\nimport numpy as np\n\n# Plot da media das porcentagens dos dias de semana\nfig, ax = plt.subplots()\n# Curva dos carros andando\nax.plot(range(len(mediaWeekdays['time'])),mediaWeekdays['mean'], label='Carros Ocupados')\n\n# Curvas representando o intervalo de desvio padrão\nax.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']+mediaWeekdays['std'], alpha=150, c='gray', label='Desvio Padrão')\nax.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']-mediaWeekdays['std'], alpha=150, c='gray')\n\n# Modificando os labels das horas\nax.xaxis.set_ticks(np.arange(0, 1441, 120))\n\nfig.canvas.draw()\n\nlabels = [item.get_text() for item in ax.get_xticklabels()]\nlabels = range(0,26,2)\n\nax.set_xticklabels(labels)\n\n# Legendas e label dos eixos\nplt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2)\nplt.ylabel('Percentual')\nplt.xlabel('Horário')\n\n# Salvando o plot\nplt.savefig('Weekdays_v2.pdf', bbox_inches='tight')\n\nplt.show()\n\n\n# In[ ]:\n\n# Plot da media das porcentagens dos dias de semana\nfig, ax = plt.subplots()\n# Curva dos carros andando\nax.plot(range(len(mediaWeekends['time'])),mediaWeekends['mean'], label='Carros Reservados')\n\n# Curvas representando o intervalo de desvio padrão\nax.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']+mediaWeekends['std'], alpha=150, c='gray', label='Desvio Padrão')\nax.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']-mediaWeekends['std'], alpha=150, c='gray')\n\n# Modificando os labels das horas\nax.xaxis.set_ticks(np.arange(0, 1441, 120))\n\nfig.canvas.draw()\n\nlabels = [item.get_text() for item in ax.get_xticklabels()]\nlabels = range(0,26,2)\n\nax.set_xticklabels(labels)\n\n# Legendas e label dos eixos\nplt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2)\nplt.ylabel('Percentual')\nplt.xlabel('Horário')\n\n# Salvando o plot\nplt.savefig('Weekends_v2.pdf', bbox_inches='tight')\n\nplt.show()\n\n\n# ## Extração das porcentagens de carros reservados\n\n# In[ ]:\n\n# CSV criado a partir dos dados coletados do arquivo ModoApi_Data_Filter\ndfTravels = pd.read_csv('../travels_v2.csv')\n\n\n# In[ ]:\n\n# Concatenando reservas consecutivas\ndfTravels = dfTravels.sort_values(by=['car_id', 'start'])\n\n\"\"\"\n# Enquanto estiver com viagens consecutivas percorrer o dataframe\nwhile (len(dfTravels[dfTravels['only_new_reserves'] == False]) > 0):\n i = 0\n while(i < len(dfTravels)-1):\n # Se a viagem atual for comum e a seguinte consecutiva, analisar a possível concatenação\n if (dfTravels['only_new_reserves'].iloc[i] and \n not dfTravels['only_new_reserves'].iloc[i+1]):\n \n # Se a viagem consecutiva terminar depois da viagem anterior efetuar concatenação\n if (dfTravels['end'].iloc[i] < dfTravels['end'].iloc[i+1]):\n dfTravels['end'].iloc[i] = dfTravels['end'].iloc[i+1]\n \n # Caso contrario, somente retirar do dataframe a viagem consecutiva\n dfTravels = dfTravels.drop(dfTravels.index[i+1])\n dfTravels.index = range(len(dfTravels))\n i -= 1\n \n i+=1\n\ndfTravels.index = range(len(dfTravels))\ndfTravels.to_csv('travels_concat_v2.csv')\n\"\"\"\n\n# ### Função para contar a porcentagem de carros reservados nos dias passados como parâmetro\n\n# In[ ]:\n\ndef cont_reservas(dfDays):\n \"\"\"\n Conta o número de reservas de todos os minutos de coleta para realizar uma média.\n \n Parameters\n ------------\n dfDays: Pandas dataframe\n Dados de todos os minutos de coleta a serem avaliados.\n \n Returns\n ----------\n media : Pandas dataframe\n Dados com a média das porcentagens para 24 horas.\n \"\"\"\n \n # Coletando todos os minutos de captura\n datas = pd.to_datetime(dfDays['capture_time'])\n datas = pd.DataFrame(datas)\n\n dfReservas = pd.concat([dfTravels['car_id'], dfTravels['start'], dfTravels['end']], axis=1)\n \n # Ordenando os valores pelo tempo de inicio das reservas\n dfReservas = dfReservas.sort_values(by='start')\n\n cont_reservas = 0\n reservas = []\n \n # Auxiliar para adquirir o indice da viagem mais proxima que engloba a hora atual\n proximo_start = 0\n\n for i in range(len(datas)):\n # Vetor para evitar multiplas ocorrências de reservas de um veículo já contabilizado\n ids = []\n data_timestamp = datas['capture_time'].iloc[i].timestamp()\n \n # Auxiliar para evitar analises desnecessárias\n# start_test = True\n\n \"\"\" Comparando todas as datas aos intervalos das reservas,\n e vendo se ele faz parte para se somar a porcentagem\"\"\"\n for j in range(len(dfReservas)):\n if (dfReservas['start'].iloc[j] <= data_timestamp and \n data_timestamp <= dfReservas['end'].iloc[j]):\n \n # Condicional para evitar contar reservas de veículos já contabilizados\n if (dfReservas['car_id'].iloc[j] in ids): \n continue\n \n cont_reservas += 1\n \n # Adicionando veículo contado em tal minuto\n ids.append(dfReservas['car_id'].iloc[j])\n \n \"\"\" Evita comparações desnecessárias de viagens que terminaram antes da hora\n a ser analisada. Seguindo a ideia de que se a viagem não englobou antes \n a hora atual ela não irá englobar as próximas\"\"\"\n #if (start_test) : \n # if (proximo_start > 0): proximo_start = j - 1\n # else: proximo_start = j\n # start_test = False\n \n # Evita analisar viagens que começaram depois da hora atual\n # a fim de diminuir o tempo de execução do loop\n if (dfReservas['start'].iloc[j] > data_timestamp):\n break\n\n porcentagem = (cont_reservas/len(car_ids))*100\n\n data = datas['capture_time'].iloc[i]\n\n reservas.append([data, cont_reservas, porcentagem])\n\n cont_reservas = 0\n \n if (i % 100 == 0): \n print('Data atual: '+str(data))\n print(' ')\n\n reservas = pd.DataFrame(reservas, columns=['datetime', 'total_reserves', 'percentage'])\n\n return reservas\n\n\n# In[ ]:\n\n# Contando reservas durante os dias de coleta\ndfR_Weekdays = cont_reservas(dfWeekdays)\ndfR_Weekends = cont_reservas(dfWeekends)\n\n\n# In[ ]:\n\ndfR_Weekends.to_csv('r_weekends_v2.csv', index=False, encoding='utf-8')\ndfR_Weekdays.to_csv('r_weekdays_v2.csv', index=False, encoding='utf-8')\n\n\n# In[ ]:\n\n# Leitura de dados já processados se necessário\n\n# dfR_Weekdays = pd.read_csv('r_weekdays_v2.csv')\n# dfR_Weekends = pd.read_csv('r_weekends_v2.csv')\n\n# Formatando os dias para datetime\n\n# dfR_Weekdays['datetime'] = pd.to_datetime(dfR_Weekdays['datetime'])\n# dfR_Weekends['datetime'] = pd.to_datetime(dfR_Weekends['datetime'])\n\n\n# In[ ]:\n\n# Plot da porcentagem de carros alocados em dias de semana\nplt.plot(dfR_Weekdays['datetime'],dfR_Weekdays['percentage'])\nplt.gcf().autofmt_xdate()\nplt.show()\n\n\n# In[ ]:\n\n# Plot da porcentagem de carros alocados em dias de semana\nplt.plot(dfR_Weekdays['datetime'],dfR_Weekdays['percentage'])\nplt.gcf().autofmt_xdate()\nplt.show()\n\n\n# In[ ]:\n\n# Plot da porcentagem de carros alocados em dias de semana\nplt.plot(dfR_Weekends['datetime'],dfR_Weekends['percentage'])\nplt.gcf().autofmt_xdate()\nplt.show()\n\n\n# In[ ]:\n\n# Fazendo a média das porcentagens de cada dia\n\n# Dias de semana\ndfR_Weekdays = dfR_Weekdays.sort_values(by='datetime')\ndfR_Weekdays['capture_time'] = dfR_Weekdays['datetime']\ndfmediaR_Weekdays = media(dfR_Weekdays, 40)\n\n# Ordenando pelo tempo\ndfmediaR_Weekdays = dfmediaR_Weekdays.sort_values(by='time')\n\ndfmediaR_Weekdays.to_csv('media_r_weekdays.csv', index=False, encoding='utf-8')\n\n# Finais de semana\ndfR_Weekends = dfR_Weekends.sort_values(by='datetime')\ndfR_Weekends['capture_time'] = dfR_Weekends['datetime']\ndfmediaR_Weekends = media(dfR_Weekends, 30)\n\n# Ordenando pelo tempo\ndfmediaR_Weekends = dfmediaR_Weekends.sort_values(by='time')\n\ndfmediaR_Weekends.to_csv('media_r_weekends.csv', index=False, encoding='utf-8')\n\n\n# ## Plotagem final da porcentagem de carros reservados e ocupados\n\n# In[ ]:\n\nimport matplotlib\n\nmatplotlib.rc('font', size=12)\n\n# Plot das porcentagens dos fins de semana\nfig, (ax1, ax2) = plt.subplots(1, 2)\n\nfig.set_size_inches(14,4.5)\n\n\n\n# Curva dos carros andando\n\nax1.plot(range(len(mediaWeekdays['time'])),mediaWeekdays['mean'], label='Carros Ocupados')\n\n# Curvas representando o intervalo de desvio padrão\nax1.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']+mediaWeekdays['std'], alpha=150, c='gray')\nax1.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']-mediaWeekdays['std'], alpha=150, c='gray')\n\n\n# Curva dos carros reservados\nax1.plot(range(len(dfmediaR_Weekdays['time'])),dfmediaR_Weekdays['mean'], label='Carros Reservados', c='r', ls='--')\n\n# Curvas representando o intervalo de desvio padrão\nax1.plot(range(len(dfmediaR_Weekdays['time'])), dfmediaR_Weekdays['mean']+dfmediaR_Weekdays['std'], alpha=150, c='#FA8072', ls='--')\nax1.plot(range(len(dfmediaR_Weekdays['time'])), dfmediaR_Weekdays['mean']-dfmediaR_Weekdays['std'], alpha=150, c='#FA8072', ls='--')\n\n\n# Modificando os labels das horas e das porcentagens\nax1.xaxis.set_ticks(np.arange(0, 1441, 120))\nax1.yaxis.set_ticks(np.arange(0, 110, 10))\n\nfig.canvas.draw()\n\nlabels = [item.get_text() for item in ax1.get_xticklabels()]\nlabels = range(0,26,2)\n\nax1.set_xticklabels(labels)\n\n# Eixo y de 0 a 100%\nax1.set_ylim([0,100])\n\n# Legendas e label dos eixos\nax1.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2)\nax1.set_ylabel('Percentual')\nax1.set_xlabel('Horário')\n\n\n\n\n# # Curva dos carros andando\nax2.plot(range(len(mediaWeekends['time'])),mediaWeekends['mean'], label='Carros Ocupados')\n\n# # Curvas representando o intervalo de desvio padrão\nax2.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']+mediaWeekends['std'], alpha=150, c='gray')\nax2.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']-mediaWeekends['std'], alpha=150, c='gray')\n\n\n# # Curva dos carros reservados\nax2.plot(range(len(dfmediaR_Weekends['time'])),dfmediaR_Weekends['mean'], label='Carros Reservados', c='r', ls='--')\n\n# # Curvas representando o intervalo de desvio padrão\nax2.plot(range(len(dfmediaR_Weekends['time'])), dfmediaR_Weekends['mean']+dfmediaR_Weekends['std'], alpha=150, c='#FA8072', ls='--')\nax2.plot(range(len(dfmediaR_Weekends['time'])), dfmediaR_Weekends['mean']-dfmediaR_Weekends['std'], alpha=150, c='#FA8072', ls='--')\n\n# Modificando os labels das horas e das porcentagens\nax2.xaxis.set_ticks(np.arange(0, 1441, 120))\nax2.yaxis.set_ticks(np.arange(0, 110, 10))\n\nfig.canvas.draw()\n\nlabels = [item.get_text() for item in ax2.get_xticklabels()]\nlabels = range(0,26,2)\n\nax2.set_xticklabels(labels)\n\n# Eixo y de 0 a 100%\nax2.set_ylim([0,100])\n\n# Legendas e label dos eixos\nax2.legend(bbox_to_anchor=(0.55, 0.99), loc=2, borderaxespad=0.1)\nax2.set_ylabel('Percentual')\nax2.set_xlabel('Horário')\n\n\nplt.show()\nplt.savefig('ViagensPorHoras_v2.pdf')\n\n\n# In[ ]:\n\n\n\n" }, { "alpha_fraction": 0.5869131684303284, "alphanum_fraction": 0.5973111987113953, "avg_line_length": 40.034481048583984, "blob_id": "656066f7d343e7351ed9b966cdcb4c1ddbae48be", "content_id": "a8c7dacfaa7fba06ebeecaf693523bbadd1c4fcc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9565, "license_type": "permissive", "max_line_length": 141, "num_lines": 232, "path": "/V2/data-filter-old.py", "repo_name": "netlabufjf/Modo-Scripts", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport datetime\nimport pytz\n\ndf = pd.read_csv('../../ModoApi_Data.csv')\n\n\n# ## Filtrando e formatando os dados\n\n# In[2]:\n\n#Limpando dados\ndf.drop(['Unnamed: 0'], axis=1, inplace=True)\ndf = df[df['CarID'] != 'CarID']\ndf.dropna(thresh=9, inplace=True)\ndf.dropna(thresh=6, inplace=True)\n\n\n# In[3]:\n\n# Formatando os dados para tipos mais apropriados\ntypes = {\n 'LocationID': int,\n 'CarID': int,\n 'FullyAvailable': float,\n 'PartlyAvailable': float,\n 'NotAvailable': float,\n 'StartTime': str,\n 'EndTime': str,\n 'Duration': str,\n 'RequestStart': int,\n 'RequestEnd': int,\n 'RequestDuration': int \n}\n\ndf = df.astype(dtype=types)\ndf['CaptureTime'] = pd.to_datetime(df['CaptureTime'])\n\ndf.drop(['LocationID', 'Duration', 'RequestDuration'], axis=1, inplace=True)\n\n# Coletando os IDs dos carros\ncar_ids = []\nfor i in range(len(df)):\n if (df['CarID'].iloc[i] in car_ids):\n continue\n else:\n car_ids.append(df['CarID'].iloc[i])\n\n\n# ## Funções para manipulação do tempo\n\n# In[4]:\n\n# Converte hora dada a time zone atual, a zona a ser convertida e a diferença de tempo\ndef convert_datetime_timezone(dt, tz1, tz2, str_diff):\n tz1 = pytz.timezone(tz1)\n tz2 = pytz.timezone(tz2)\n\n dt = datetime.datetime.fromtimestamp(dt)\n dt = datetime.datetime.strptime(str(dt),\"%Y-%m-%d %H:%M:%S\")\n dt = tz1.localize(dt)\n dt = dt.astimezone(tz2)\n try:\n dt = datetime.datetime.strptime(str(dt),\"%Y-%m-%d %H:%M:%S\"+str_diff)\n # Except para evitar um erro pela mudança de fuso horário\n except Exception as e:\n print(e)\n # Diminui 1 hora para se adaptar\n # dt = dt - datetime.timedelta(hours=1)\n dt = datetime.datetime.strptime(str(dt),\"%Y-%m-%d %H:%M:%S\"+'-08:00')\n \n dt = int(dt.timestamp())\n\n return dt\n\n\n# In[5]:\n\n# Faz a diferença entre duas horas dadas e retorna em minutos\ndef Hour_Diff(h1,h2):\n h1Aux = datetime.datetime.fromtimestamp(h1)\n h2Aux = datetime.datetime.fromtimestamp(h2)\n diff = abs((h1Aux - h2Aux)).total_seconds()/60\n \n return diff\n\n\n# ## Filtro para classificação dos dados\n\n# In[6]:\n\nnew_travel = []\ncancel = []\nparked = []\nspecial_travel = []\n\nfor j in range(len(car_ids)):\n carID = car_ids[j]\n carDF = []\n \n carDF = df[df['CarID'] == carID]\n carDF = carDF.sort_values(by=['CaptureTime'])\n \n for i in range(1, len(carDF)):\n try:\n # Extração com base no Start Time\n # Convertendo todos os timestamps para o fuso horário de vancouver\n start_time_atual = int(carDF['StartTime'].iloc[i])\n start_time_atual = convert_datetime_timezone(start_time_atual, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n start_time_anterior = int(carDF['StartTime'].iloc[i-1])\n start_time_anterior = convert_datetime_timezone(start_time_anterior, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n request_start_atual = carDF['RequestStart'].iloc[i]\n request_start_atual = convert_datetime_timezone(request_start_atual, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n request_start_anterior = carDF['RequestStart'].iloc[i-1]\n request_start_anterior = convert_datetime_timezone(request_start_anterior, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n capture_timestamp = int(carDF['CaptureTime'].iloc[i].timestamp())\n capture_timestamp = convert_datetime_timezone(capture_timestamp, 'UTC', 'America/Vancouver', '-07:00')\n \n #Verifica se a coleta começou com o carro parado\n if (i == 1 and start_time_atual == request_start_atual):\n \n start_parked = capture_timestamp \n \n \n # Verifica se a coleta começou com o carro andando\n if (i == 1 and start_time_atual > request_start_atual): \n \n # Registra uma nova viagem\n new_travel.append([carID, capture_timestamp, start_time_atual, True])\n \n # Se o inicio do tempo de disponibilidade > tempo de requisição E estava Parado temos uma nova viagem. \n elif (start_time_atual > request_start_atual and start_time_anterior == request_start_anterior):\n \n # Registra uma nova viagem\n new_travel.append([carID, capture_timestamp, start_time_atual, True])\n \n # Registra o final do tempo em que estava estacionado\n if (start_parked > 0):\n parked.append([carID, start_parked, capture_timestamp])\n start_parked = -1\n\n\n #Se o inicio do tempo de disponibilidade anterior < tempo de disponibilidade atual E está andando temos uma nova viagem/extensão.\n elif (start_time_anterior < start_time_atual and start_time_atual > request_start_atual):\n \n new_travel.append([carID, capture_timestamp, start_time_atual, False])\n\n \n # Se estava andando e agora está parado.\n if (start_time_atual == request_start_atual and start_time_anterior > request_start_anterior):\n \n # Inicio do tempo estacionado\n start_parked = capture_timestamp\n\n if(new_travel != []):\n \n # Se a diferença entre hora atual e o inicio da ultima viagem < 30 min => cancelamento\n # Senão => Diminuição/Cancelamento\n if (Hour_Diff(capture_timestamp, new_travel[-1][1]) < 30):\n \n cancel.append([carID, capture_timestamp, start_time_anterior, True])\n\n #Tolerância de 20 min para dizer que foi uma diminuição ou cancelamento em vez de um termino de viagem\n elif(Hour_Diff(capture_timestamp, new_travel[-1][2]) > 20):\n \n cancel.append([carID, capture_timestamp, start_time_anterior, False])\n\n \n # Se acontecer um aumento do tempo de diponibilidade enquanto está andando, ocorreu um cancelamento ou diminuição\n if (start_time_atual > request_start_atual and start_time_anterior > start_time_atual):\n \n cancel.append([carID, capture_timestamp, start_time_anterior, False]) \n \n \n # Extração com base no End Time\n end_time_atual = float(carDF['EndTime'].iloc[i])\n end_time_atual = convert_datetime_timezone(end_time_atual, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n end_time_anterior = float(carDF['EndTime'].iloc[i-1])\n end_time_anterior = convert_datetime_timezone(end_time_anterior, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n request_end_atual = carDF['RequestEnd'].iloc[i]\n request_end_atual = convert_datetime_timezone(request_end_atual, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n request_end_anterior = carDF['RequestEnd'].iloc[i-1]\n request_end_anterior = convert_datetime_timezone(request_end_anterior, 'America/Sao_Paulo', 'America/Vancouver','-07:00')\n \n # Se estava com um horário marcado de fim de disponibilidade e agora está livre até o dia seguinte houve um cancelamento\n if (end_time_anterior < request_end_anterior and end_time_atual == request_end_atual):\n \n cancel.append([carID, capture_timestamp, end_time_anterior, True])\n \n\n # Se a janela de disponibilidade diminuiu pelo end time foi agendada uma nova viagem/extensão\n if (end_time_anterior > end_time_atual and end_time_anterior < request_end_anterior):\n \n new_travel.append([carID, end_time_atual, end_time_anterior, False])\n \n # Se a janela de disponibilidade aumentou pelo end time ocorreu um cancelamento/diminuição \n elif (end_time_anterior < end_time_atual and end_time_atual < request_end_atual):\n \n cancel.append([carID, capture_timestamp, end_time_anterior, False])\n \n # Se antes estava sem fim da janela de disponibilidade e agora end time < request_end\n # Temos uma nova viagem mas sem ter como dizer a sua duração\n if (end_time_anterior == request_end_anterior and end_time_atual < request_end_atual):\n \n special_travel.append([carID, capture_timestamp, end_time_atual])\n \n \n except Exception as e:\n print('Loop:'+str(e))\n # Exception será gerada nos casos em que o carro não está disponível\n continue\n\ndfTravels = pd.DataFrame(new_travel,columns=['car_id', 'start', 'end', 'only_new_reserves'])\ndfCancel = pd.DataFrame(cancel, columns=['car_id', 'capture_time', 'previous_start', 'only_cancel'])\ndfParked = pd.DataFrame(parked, columns=['car_id', 'start', 'end'])\ndfSpecial_Travel = pd.DataFrame(special_travel, columns=['car_id', 'capture_time', 'start'])\n\n\ndfTravels.to_csv('travels_sem_reduzir_v1.csv', index=False, encoding='utf-8')\ndfCancel.to_csv('cancel_sem_reduzir_v1.csv', index=False, encoding='utf-8')\ndfParked.to_csv('parked_sem_reduzir_v1.csv', index=False, encoding='utf-8')\n" }, { "alpha_fraction": 0.6187469363212585, "alphanum_fraction": 0.6629431843757629, "avg_line_length": 28, "blob_id": "dc5eca67ff9cdf981f7a8ec98e7f2fbfdc70d73f", "content_id": "ecb6f542d2f2089fca0f0271f0d67bf52b6f4678", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4119, "license_type": "permissive", "max_line_length": 210, "num_lines": 142, "path": "/V2/novo_processamento.py", "repo_name": "netlabufjf/Modo-Scripts", "src_encoding": "UTF-8", "text": "# Filtra os dados\n# cat ModoApi_Data.csv | awk -F \",\" '{print $2\",\"$3\",\"$4\",\"$8}' > Filtered_ModoApi_Data.csv\n\n# retira as linhas com False\n# cat data/Filtered_ModoApi_Data.csv | grep -v \"False\" > data/True_Filtered_ModoApi_Data.csv\n\n# retira as linhas com NaN\n# cat data/True_Filtered_ModoApi_Data.csv | grep -v \",,\" > data/NaN_True_Filtered_ModoApi_Data.csv\n\n# retira as linhas com Cabecalho\n# cat data/NaN_True_Filtered_ModoApi_Data.csv | grep -v \"LocationID\" > data/Header_NaN_True_Filtered_ModoApi_Data.csv\n\n# converte datas em timestemp\n# cat data/Header_NaN_True_Filtered_ModoApi_Data.csv | awk -F ',' '{cmd=\"date -d \\\"\"$3\"\\\" +%s\"; cmd| getline $3; close(cmd); print $1\",\"$2\",\"$3\",\"$4}' > data/Timestemp_Header_NaN_True_Filtered_ModoApi_Data.csv\n\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nimport pytz\n\ndf = pd.read_csv('data/Header_NaN_True_Filtered_ModoApi_Data.csv',\n names=[\"LocationID\", \"CarID\", \"CaptureTime\", \"StartTime\"])\ndf.count()\n# limpa registros com nulo\ndf = df.dropna()\n\ndf.dtypes\n\ntypes = {\n 'LocationID': int,\n 'CarID': int,\n 'StartTime': int}\n\ndf = df.astype(dtype=types)\n\ndf.count()\n\ndf\n\ndf['CaptureTime'] = pd.to_datetime(df['CaptureTime'])\n\n\ndf['CaptureTime'] = df['CaptureTime'].astype('int64')//1e9\n\n\ndf.to_csv('data/Timestemp_Header_NaN_True_Filtered_ModoApi_Data.csv', index=False)\n\ndf\n\n# Dom, 5 de Nov, 02:00 - 1509854400\n# Dom, 15 de Out, 00:00 - 1508036400\n\ndf[df.CaptureTime < 1508036400].count()\ndf[(df.CaptureTime > 1508036400) & (df.CaptureTime < 1509854400)].count()\ndf[df.CaptureTime > 1509854400].count()\n\n# ate dia 15/10 diminui 4h\ndf.loc[df.CaptureTime < 1508036400, \"CaptureTime\"] = df[\"CaptureTime\"] - (4*3600)\n\n# do dia 15/10 até dia 05/11 diminui 5h\ndf.loc[(df.CaptureTime > 1508036400) & (df.CaptureTime < 1509854400),\n \"CaptureTime\"] = df[\"CaptureTime\"] - (5*3600)\n\n# do dia 05/11 pra frente diminui 6h\ndf.loc[df.CaptureTime > 1509854400, \"CaptureTime\"] = df[\"CaptureTime\"] - (6*3600)\n\ndf.to_csv('data/Vancouver_Timestemp_Header_NaN_True_Filtered_ModoApi_Data.csv', index=False)\n\ndf\n\n# ordena pelo carro, hora de captura e start\ndf.sort_values(by=['CarID', 'CaptureTime', 'StartTime'], inplace=True)\n\nmin = df.CaptureTime.min()\nmax = df.StartTime.max()\n\nminutosTotais = (max-min)/60\n\ndfCarId = df.groupby(['CarID'], as_index=False).count().CarID.copy()\n\n# para cada carro foi criado um vetor com todos os minutos possiveis de reserva\ndfTimelineReservas = pd.DataFrame(columns=['CarID', 'Minuto', 'Reserva'])\n\nfor carId in dfCarId:\n inicio = min\n for i in range(0, minutosTotais):\n minutoAtual = inicio + i*60\n if(df[(df['CaptureTime'] <= minutoAtual) & (df['StartTime'] > minutoAtual)].empty):\n linha = {'CarID': carId, 'Minuto': minutoAtual, 'Reserva': False}\n dfTimelineReservas = dfTimelineReservas.append(linha, ignore_index=True)\n else:\n linha = {'CarID': carId, 'Minuto': minutoAtual, 'Reserva': True}\n dfTimelineReservas = dfTimelineReservas.append(linha, ignore_index=True)\n\ndfTimelineReservas.to_csv('data/Processados.csv', index=False)\n\n\n# do dia 15/10 ate o dia 05/11\n\n# do dia 05/11 pra frente\n\n\n# apaga linhas com dados com False\n# df = df[df['StartTime'] != 'False']\n# df.loc[df['LocationID'].str.contains(non_numeric) == True]\n\n# visualizar os dados apagados\n# df.loc[pd.to_numeric(df['StartTime'], errors='coerce').isnull()]\n\n# apaga linhas nao numeriacas\n# df['LocationID'] = pd.to_numeric(df['LocationID'], errors='coerce')\n# df = df.dropna()\n#\n# df.to_csv('data/Filtered_ModoApi_Data.csv')\n#\n#\n# df.count()\n#\n# df = df.dropna()\n# df.count()\n#\n# df\n#\n# # Formatando os dados para tipos mais apropriados\n# types = {\n# 'LocationID': int,\n# 'CarID': int,\n# 'StartTime': int\n# }\n#\n# df = df.astype(dtype=types)\n#\n# # cat data/miFiltered_ModoApi_Data.csv | awk -F ',' '{cmd=\"date -d \\\"\"$3\"\\\" +%s\"; cmd| getline $3; close(cmd); print $1\",\"$2\",\"$3\",\"$4} > '\n#\n# df.dtypes\n#\n#\n# df['Teste'] = df['CaptureTime'].apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f'))\n#\n# # pd.to_datetime(df['CaptureTime'], )\n#\n# df\n" } ]
3
costinaioana18/Museek
https://github.com/costinaioana18/Museek
f7172d3ecafbc21d8cea4417e1b20406fdafd689
7dc4f5bd175c9218b5544dff8bc9f619d0e46edb
a0b834ad0fc3399f45d3485acf31dc98b7fdc9f7
refs/heads/main
2023-05-24T15:07:29.165168
2021-06-21T13:31:48
2021-06-21T13:31:48
357,276,952
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5365431308746338, "alphanum_fraction": 0.5555940270423889, "avg_line_length": 44.59677505493164, "blob_id": "bd9ef3ec6951068d93c44315759cf3ef79052098", "content_id": "9eaaf9f366490c344719e7ea509acb52e18c7672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2887, "license_type": "no_license", "max_line_length": 142, "num_lines": 62, "path": "/useful_classes/inputBox.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame as pg\r\n#This class contains fragments of code inspired by: https://stackoverflow.com/questions/46390231/how-can-i-create-a-text-input-box-with-pygame\r\nclass InputBox:\r\n def __init__(self, x, y, w, h, text=''):\r\n self.rect = pg.Rect(x, y, w, h)\r\n self.COLOR_INACTIVE = pg.Color((177, 114, 97))\r\n self.COLOR_ACTIVE = pg.Color((255, 162, 193))\r\n self.FONT = pg.font.SysFont('inkfree', 32)\r\n self.color = self.COLOR_INACTIVE\r\n self.text = text\r\n self.default_text= text\r\n self.txt_surface = self.FONT.render(text, True, self.color)\r\n self.protected_text=''\r\n self.active = False\r\n self.length=len(text)\r\n\r\n #this functions contains code inspired by:https://stackoverflow.com/questions/46390231/how-can-i-create-a-text-input-box-with-pygame\r\n def handle_event(self, event,protected=False):\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if self.rect.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n self.active = not self.active\r\n if self.default_text==self.text:\r\n self.text=''\r\n self.protected_text=''\r\n else:\r\n self.active = False\r\n # Change the current color of the input box.\r\n self.color = self.COLOR_ACTIVE if self.active else self.COLOR_INACTIVE\r\n if event.type == pg.KEYDOWN:\r\n if self.active:\r\n if event.key == pg.K_RETURN:\r\n #print(self.text)\r\n return self.text\r\n self.text = ''\r\n self.protected_text=''\r\n elif event.key == pg.K_BACKSPACE:\r\n self.text = self.text[:-1]\r\n self.protected_text = self.protected_text[:-1]\r\n else:\r\n self.text += event.unicode\r\n self.protected_text+='*'\r\n if protected:\r\n self.txt_surface = self.FONT.render(self.protected_text, True, self.color)\r\n else:\r\n self.txt_surface = self.FONT.render(self.text, True, self.color)\r\n self.length=len(self.text)\r\n def set_text(self,text):\r\n self.text=text\r\n\r\n #this function contains code inspired by:https://stackoverflow.com/questions/46390231/how-can-i-create-a-text-input-box-with-pygame\r\n def update(self):\r\n # Resize the box if the text is too long.\r\n width = max(400, self.txt_surface.get_width()+10)\r\n self.rect.w = width\r\n\r\n def draw(self, screen):\r\n # Blit the text.\r\n print(self.length)\r\n screen.blit(self.txt_surface, (self.rect.x+self.rect.w/2-self.length*6, self.rect.y+10))\r\n pg.draw.rect(screen, self.color, self.rect, 2)" }, { "alpha_fraction": 0.47499245405197144, "alphanum_fraction": 0.5031635761260986, "avg_line_length": 43.77931213378906, "blob_id": "9acdaf3d193c178fc84ee30c9c8ca8d8ea1878aa", "content_id": "5f56e794399c7dc42ce046c043ec2b30602bca84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6638, "license_type": "no_license", "max_line_length": 155, "num_lines": 145, "path": "/screens/forgot_password.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom screens.menu_screen import Menu_screen\r\nfrom useful_classes.inputBox import InputBox\r\nfrom useful_classes.encryption import *\r\n\r\n\r\nclass Forgot_password_screen():\r\n def __init__(self, app):\r\n self.play_icon = pygame.image.load('icons/play.jpg')\r\n self.app = app\r\n self.click = False\r\n self.font = pygame.font.SysFont('inkfree', 22)\r\n self.u = None\r\n self.p = None\r\n self.database_handler = self.app.database_handler\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users\")\r\n self.password = \"None\"\r\n self.complete_fields = 0\r\n self.succes = None\r\n self.recovery_questions = [\"your first dog\", \"your first crush\", \"your favourite flavour\",\r\n \"your favourite uncle\", \"your secret talent\"]\r\n\r\n self.recovery_index = 0\r\n self.recovery_question = \"your first dog\"\r\n\r\n def database_check(self):\r\n print(\"hai verilor\")\r\n q = self.database_handler.exists(\"username\", self.u)\r\n if q:\r\n recovery_question = self.database_handler.get(\"username\", self.u, \"recovery_question\")\r\n recovery_answer = self.database_handler.get(\"username\", self.u, \"recovery_answer\")\r\n self.password = self.database_handler.get(\"username\", self.u, \"password\")\r\n self.password = decrypt(self.password)\r\n if recovery_answer == self.p and recovery_question == self.recovery_question:\r\n self.succes = 1\r\n self.app.set_user(self.u)\r\n else:\r\n self.succes = 0\r\n\r\n else:\r\n self.succes = 0\r\n\r\n def forgot_password(self):\r\n running = True\r\n username_input = InputBox(250, 180, 400, 50, \"username\")\r\n password_input = InputBox(250, 300, 400, 50, \"your recovery answer\")\r\n input_boxes = [username_input, password_input]\r\n menu_button = pygame.Rect(250, 450, 400, 50)\r\n # next_button = pygame.Rect(500, 100, 50, 50)\r\n # next_icon = pygame.image.load(\"icons/next.jpg\")\r\n next_button = pygame.Rect(660, 260, 30, 30)\r\n back_next_button = pygame.Rect(210, 260, 30, 30)\r\n next_icon = pygame.image.load(\"icons/little_next.jpg\")\r\n back_next_icon = pygame.image.load(\"icons/little_backwards_next.jpg\")\r\n\r\n while running:\r\n click = False\r\n self.app.screen.fill((0, 0, 0)) #beginning\r\n for event in pygame.event.get(): #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.key == K_RETURN:\r\n u = username_input.handle_event(event)\r\n p = password_input.handle_event(event) #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if u != None: #end\r\n self.u = u\r\n if p:\r\n self.p = p\r\n\r\n print(self.u)\r\n print(self.p)\r\n if self.u and self.p:\r\n self.complete_fields = 1\r\n\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n\r\n username_input.handle_event(event)\r\n password_input.handle_event(event)\r\n for box in input_boxes:\r\n box.update()\r\n\r\n if (self.succes == 0):\r\n self.app.draw_text('Wrong username or password', self.app.font, (255, 255, 255), self.app.screen, 20,\r\n 500)\r\n # self.app.draw_text('login', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n mx, my = pygame.mouse.get_pos()\r\n if menu_button.collidepoint((mx, my)):\r\n if click and self.complete_fields:\r\n self.database_check()\r\n if (self.succes == 1):\r\n # elf.menu_screen.menu()\r\n print(self.password)\r\n # recovery_question = self.database_handler.get(\"username\", self.u, \"recovery_question\")\r\n\r\n username_input.draw(self.app.screen)\r\n password_input.draw(self.app.screen)\r\n\r\n if next_button.collidepoint((mx, my)):\r\n if click:\r\n if self.recovery_index == 4:\r\n self.recovery_index = 0\r\n else:\r\n self.recovery_index += 1\r\n self.recovery_question = self.recovery_questions[self.recovery_index]\r\n print(\"click\")\r\n\r\n if back_next_button.collidepoint((mx, my)):\r\n if click:\r\n if self.recovery_index == 0:\r\n self.recovery_index = 4\r\n else:\r\n self.recovery_index -= 1\r\n self.recovery_question = self.recovery_questions[self.recovery_index]\r\n print(\"click\")\r\n\r\n if (self.succes == 1):\r\n self.app.draw_text(\"Your password is: \" + self.password, self.app.font, (255, 255, 255),\r\n self.app.screen, 120,\r\n 500)\r\n\r\n # self.app.screen.blit(next_icon, (500, 100))\r\n self.app.screen.blit(self.play_icon, (250, 450))\r\n self.app.draw_text(\"recovery question: \" + self.recovery_question, self.font, self.app.color,\r\n self.app.screen, 250,\r\n 260)\r\n self.app.draw_text(\"Account recovery\", pygame.font.SysFont('inkfree', 42), self.app.color,\r\n self.app.screen, 300,\r\n 60)\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n self.app.screen.blit(next_icon, (660, 260))\r\n self.app.screen.blit(back_next_icon, (210, 260))\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.flip()\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.4817880690097809, "alphanum_fraction": 0.503311276435852, "avg_line_length": 39.655174255371094, "blob_id": "70dbba27afd6ece782421787c6a7d9a114f21883", "content_id": "98c69b7e33e0cd05115cafec1c3a973734bdd7a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3624, "license_type": "no_license", "max_line_length": 143, "num_lines": 87, "path": "/screens/general_kno_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom questions.choise_question import Choice_Question\r\nfrom questions.gen_kno_question import Gen_Kno_Question\r\nfrom screens.piano_tutorial_screen import Piano_tutorial_screen\r\nfrom questions.piano_question import Piano_Question\r\nimport random\r\n\r\n\r\nclass General_kno_screen():\r\n def __init__(self, app):\r\n self.checked = 0\r\n self.app = app\r\n self.click = False\r\n self.q = Piano_Question(self.app, 0, \"choice\")\r\n self.tutorial_icon = pygame.image.load('icons/tutorial.jpg')\r\n self.piano_tutorial_screen = Piano_tutorial_screen(self.app)\r\n self.started = 0\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n self.case = 0\r\n\r\n def hover_photo(self):\r\n if self.case == 1:\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn_hov.jpg')\r\n else:\r\n self.next_submit_icon = pygame.image.load('icons/play_hov.jpg')\r\n\r\n def unhover_photo(self):\r\n if self.case == 1:\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn.jpg')\r\n else:\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n\r\n def general_kno(self):\r\n\r\n running = True\r\n next_button = pygame.Rect(250, 450, 400, 50)\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start\r\n if event.type == QUIT: #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n #end\r\n self.app.screen.fill((0, 0, 0))\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n if next_button.collidepoint((mx, my)):\r\n self.hover_photo()\r\n if click:\r\n if (self.started == 0):\r\n self.checked = 1\r\n self.started = 1\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn.jpg')\r\n if self.checked == 1:\r\n self.q = Gen_Kno_Question(self.app, 0)\r\n self.q.set_next()\r\n self.checked = 0\r\n else:\r\n self.q.check_answer()\r\n self.checked = 1\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n else:\r\n self.unhover_photo()\r\n if click:\r\n received = self.q.receive_answer(mx, my)\r\n\r\n self.app.screen.blit(self.next_submit_icon, (250, 450))\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n self.q.display()\r\n x = 60\r\n y = 40\r\n if (self.started == 0):\r\n self.app.draw_text(\"Press 'continue' to start the test\", self.app.font, (255, 255, 255),\r\n self.app.screen, 150 + x, 120 + y)\r\n\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.727477490901947, "alphanum_fraction": 0.7376126050949097, "avg_line_length": 25.909090042114258, "blob_id": "6375ed9c7df5174f5a2eb1da76cd33e9672dbd66", "content_id": "a3c1b800e348ef8116fc8cff11cae04c934440e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 888, "license_type": "no_license", "max_line_length": 122, "num_lines": 33, "path": "/README.md", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "# MuSeek\nMuSeek was designed to make the idea of learning the piano easier and fun. \n\n- LEARN...\n\n ...Using our visual, audio and kinestezic tutorials\n\n- PRACTICE...\n\n ...Using our instrument's simulator which includes a voice chat bot\n\n- EVALUATE...\n\n ...Using different types audio, visual and kinestezic levels\n\n- ANALYSE...\n\n ...Using your account page, which predicts the date of your graduation, based on your progress\n\n- GO WITH THE FLOW\n\n ...Using our recommended section, containing levels specially chosen for you\n\n- HELP US HELP YOU GROW\n\n ...Rate us using the feedback page or by filling up the following survey in order to grow in a way will help you grow:\nhttps://docs.google.com/forms/d/1PqoF3jY5mfZZb-xlsTAXEqELi_6na15s6qIVMSI3yKo/edit\n\n\nPlay our game today in order to play the piano tomorrow!\n\n\nDemo video: https://www.youtube.com/watch?v=lKT_tiFx3WM\n" }, { "alpha_fraction": 0.6355841159820557, "alphanum_fraction": 0.6516613364219666, "avg_line_length": 39.42222213745117, "blob_id": "29e71318088ed0feb450b132950a984fde5a4360", "content_id": "e67e4d267baed077c5a2c6586fc2e637a3bd6bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 147, "num_lines": 45, "path": "/useful_classes/database.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import ssl\r\n\r\nfrom pymongo import MongoClient\r\n\r\n\r\nclass Database:\r\n #code borrowed and improved from our IP project - CLEF2020 our teams repository: https://github.com/MoisanuStefan/CLEF2020-CheckThat-Lab-Team3\r\n def __init__(self, connection_string):\r\n self.__connection_string = connection_string\r\n self.__database_name = None\r\n self.__client = None\r\n self.__db_handler = None\r\n self.__collection_handler = None\r\n\r\n # code borrowed and improved from our IP project - CLEF2020 our teams repository: https://github.com/MoisanuStefan/CLEF2020-CheckThat-Lab-Team3\r\n def database_init(self, database_name):\r\n self.__database_name = database_name\r\n self.__client = MongoClient(self.__connection_string,ssl_cert_reqs=ssl.CERT_NONE)\r\n self.__db_handler = self.__client[self.__database_name]\r\n\r\n # code borrowed and improved from our IP project - CLEF2020 our teams repository: https://github.com/MoisanuStefan/CLEF2020-CheckThat-Lab-Team3\r\n def set_collection(self, collection_name):\r\n self.__collection_handler = self.__db_handler[collection_name]\r\n return self.__collection_handler\r\n\r\n def insert(self,data):\r\n self.__collection_handler.insert_one(data)\r\n\r\n def exists(self,key,value):\r\n result = self.__collection_handler.find_one({key: value})\r\n if (result):\r\n return 1\r\n else:\r\n return 0\r\n\r\n def get(self,key,value,to_get):\r\n result = self.__collection_handler.find_one({key: value})\r\n return result[to_get]\r\n\r\n def update_database(self, key, key_value, to_update_key, to_update_value):\r\n pass\r\n\r\n def increment_database(self, key, key_value, to_update_key, pas=1):\r\n self.__collection_handler.update_one({key: key_value},{\"$inc\":{to_update_key:pas}})\r\n print(\"incremented\")\r\n\r\n" }, { "alpha_fraction": 0.504999041557312, "alphanum_fraction": 0.538325846195221, "avg_line_length": 42.74561309814453, "blob_id": "541eedd849bba5fa0879feeaf3770f72a7a5a3d1", "content_id": "4a836562f8b4bd3291a576840213043015a0f09d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5101, "license_type": "no_license", "max_line_length": 113, "num_lines": 114, "path": "/questions/chord_question.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import random\r\nfrom useful_classes.database import *\r\nfrom useful_classes.piano_sound import Piano_sound\r\nimport pygame\r\n\r\n\r\nclass Chord_Question():\r\n def __init__(self, app):\r\n self.database_handler = app.database_handler\r\n self.outcome = None\r\n self.checked = 0\r\n self.app = app\r\n self.question = \"Play chord C minor\"\r\n self.notex_title_list = ['C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G',\r\n 'Ab', 'A', 'Bb', 'B']\r\n self.chords_name = [\"C minor\", \"D minor\", \"E minor\", \"F minor\", \"G minor\", \"A minor\", \"B minor\"]\r\n self.chords = [[0, 3, 7], [2, 5, 9], [4, 7, 11], [5, 8, 0], [7, 10, 2], [9, 0, 4], [11, 2, 6]]\r\n self.chords_sounds = [\"sounds/Chord-cm.wav\", \"sounds/Chord-dm.wav\", \"sounds/Chord-em.wav\",\r\n \"sounds/Chord-fm.wav\", \"sounds/Chord-em.wav\", \"sounds/Chord-am.wav\",\r\n \"sounds/Chord-bm.wav\"]\r\n self.sound = self.chords_sounds[0]\r\n self.topic = [0, 2, 4, 5, 7, 9, 11]\r\n self.answer = 0\r\n self.current_answer = [-1, -1, -1]\r\n self.right_answer = [0, 3, 7]\r\n self.current_position = -1\r\n self.soundd = Piano_sound(\"sounds/piano-c.wav\")\r\n\r\n def set_random(self):\r\n i = self.answer\r\n while i == self.answer:\r\n i = random.randint(0, 6)\r\n self.answer = i\r\n\r\n def next_question(self):\r\n\r\n self.current_position = -1\r\n self.current_answer = [-1, -1, -1]\r\n self.set_random()\r\n self.question = \"Play chord \" + self.chords_name[self.answer]\r\n self.right_answer = self.chords[self.answer]\r\n self.sound = self.chords_sounds[self.answer]\r\n self.checked = 0\r\n\r\n def play_chord(self):\r\n self.soundd.set_note(self.chords_sounds[self.answer])\r\n self.soundd.play()\r\n\r\n def receive_answer(self, i):\r\n if self.current_position == -1:\r\n self.current_position = 0\r\n if (self.current_position == 2 and self.current_answer[2] == -1):\r\n self.current_answer[self.current_position] = i\r\n elif self.current_position <= 1:\r\n self.current_answer[self.current_position] = i\r\n self.current_position += 1\r\n print(self.current_answer)\r\n\r\n def is_correct(self):\r\n for i in range(0, 3):\r\n if self.right_answer[i] != self.current_answer[i]:\r\n return -1\r\n return 1\r\n\r\n def redo(self):\r\n self.current_position = -1\r\n self.current_answer = [-1, -1, -1]\r\n self.checked = 0\r\n\r\n def check_answer(self):\r\n self.current_position = 0\r\n correct = self.is_correct()\r\n if correct == 1:\r\n self.outcome = \"Congratulations. You made it!\"\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n self.database_handler.increment_database(\"username\", self.app.current_user, \"chords_s\", 1)\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n self.database_handler.insert(\r\n {\"piano_c\": 0, \"piano_l\": 0, \"piano_r\": 0, \"gen_c\": 0, \"chords\": 1,\r\n \"topic\": self.topic[self.answer], \"result\": 1})\r\n\r\n else:\r\n self.outcome = \"That's wrong... but play it cool!\"\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n self.database_handler.increment_database(\"username\", self.app.current_user, \"chords_f\", 1)\r\n\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n self.database_handler.insert(\r\n {\"piano_c\": 0, \"piano_l\": 0, \"piano_r\": 0, \"gen_c\": 0, \"chords\": 1,\r\n \"topic\": self.topic[self.answer], \"result\": 0})\r\n\r\n self.checked = 1\r\n\r\n def display(self):\r\n self.app.draw_text(self.question, self.app.font, (255, 255, 255), self.app.screen, 310, 50)\r\n if (self.checked == 1):\r\n self.app.draw_text(self.outcome, self.app.font, (255, 255, 255), self.app.screen, 225, 500)\r\n for i in range(0, 3):\r\n self.app.draw_text(self.notex_title_list[self.current_answer[i]], self.app.font, (255, 255, 255),\r\n self.app.screen, 300 + i * 50, 100)\r\n\r\n for i in range(0, 3):\r\n if self.current_answer[i] != -1:\r\n self.app.draw_text(self.notex_title_list[self.current_answer[i]], self.app.font, (255, 255, 255),\r\n self.app.screen, 300 + i * 50, 100)\r\n\r\n if (self.current_position != -1):\r\n for i in range(0, self.current_position):\r\n self.app.draw_text(self.notex_title_list[self.current_answer[i]], self.app.font, (255, 255, 255),\r\n self.app.screen, 300 + i * 50, 100)\r\n" }, { "alpha_fraction": 0.3892171382904053, "alphanum_fraction": 0.4719350039958954, "avg_line_length": 48.90225601196289, "blob_id": "31380614a7ab2ba31844ca3098ead6690040d418", "content_id": "0b8ff3d4676657f163ffb3eca53acf4efc079303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6770, "license_type": "no_license", "max_line_length": 143, "num_lines": 133, "path": "/screens/piano_chords_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom useful_classes.piano_sound import Piano_sound\r\nfrom questions.chord_question import *\r\n\r\n\r\nclass Piano_chords_screen():\r\n def __init__(self, app):\r\n self.point_x = 0\r\n self.point_y = 0\r\n self.question = Chord_Question(app)\r\n self.app = app\r\n self.click = False\r\n self.note_icon1 = pygame.image.load('icons/pianoFUL.jpg')\r\n self.notex_title_list = ['C (Do)', 'C# (Do#)', 'D (Re)', 'Eb (Mi b)', 'E (Mi)', 'F (Fa)', 'F (Fa#)', 'G (Sol)',\r\n 'Ab (La b)', 'A (La)', 'Bb (Si b)', 'B (Si)']\r\n self.piano_sound = Piano_sound(\"sounds/piano-c.wav\")\r\n self.sounds_list = [\"sounds/piano-c.wav\", \"sounds/piano-cd.wav\", \"sounds/piano-d.wav\", \"sounds/piano-eb.wav\",\r\n \"sounds/piano-e.wav\", \"sounds/piano-f.wav\", \"sounds/piano-fd.wav\", \"sounds/piano-g.wav\",\r\n \"sounds/piano-ab.wav\", \"sounds/piano-a.wav\", \"sounds/piano-bb.wav\", \"sounds/piano-b.wav\"]\r\n self.new_button_list = [(100, 348, 30, 82), (118, 200, 18, 150), (133, 348, 30, 82), (161, 200, 18, 150),\r\n (166, 348, 30, 82), (200, 348, 30, 82),\r\n (220, 200, 18, 150), (234, 348, 30, 82), (257, 200, 18, 150), (266, 348, 30, 82),\r\n (292, 200, 18, 150), (300, 348, 30, 82),\r\n (334, 348, 30, 82), (354, 200, 18, 150), (367, 348, 30, 82), (392, 200, 18, 150),\r\n (401, 348, 30, 82), (434, 348, 30, 82),\r\n (451, 200, 18, 150), (467, 348, 30, 82), (490, 200, 18, 150), (501, 348, 30, 82),\r\n (528, 200, 18, 150), (534, 348, 30, 82),\r\n (567, 348, 30, 82), (586, 200, 18, 150), (601, 348, 30, 82), (625, 200, 18, 150),\r\n (635, 348, 30, 82), (667, 348, 30, 82),\r\n (686, 200, 18, 150), (700, 348, 30, 82), (724, 200, 18, 150), (734, 348, 30, 82),\r\n (761, 200, 18, 150), (769, 348, 30, 82), ]\r\n\r\n self.buttons = []\r\n for coord in self.new_button_list:\r\n self.buttons.append(pygame.Rect(coord))\r\n\r\n self.next_chord_button = pygame.Rect(50, 50, 100, 100)\r\n self.play_button = pygame.Rect(700, 25, 100, 100)\r\n self.play_icon = pygame.image.load(\"icons/play_icon.jpg\")\r\n self.redo_icon = pygame.image.load(\"icons/redo_icon.jpg\")\r\n self.redo_button = pygame.Rect(50, 450, 100, 100)\r\n self.submit_icon = pygame.image.load(\"icons/check_icon.jpg\")\r\n self.submit_button = pygame.Rect(700, 450, 100, 100)\r\n\r\n self.play_icon_hov = pygame.image.load(\"icons/play_icon_hov.jpg\")\r\n self.redo_icon_hov = pygame.image.load(\"icons/redo_icon_hov.jpg\")\r\n self.submit_icon_hov = pygame.image.load(\"icons/check_icon_hov.jpg\")\r\n\r\n def play_the_piano(self):\r\n running = True\r\n playing = True\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n self.app.screen.fill((0, 0, 0))\r\n mx, my = pygame.mouse.get_pos() #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n\r\n if click:\r\n print(str(mx) + ' ' + str(my))\r\n if self.play_button.collidepoint(mx, my):\r\n self.app.screen.blit(self.play_icon_hov, (700, 25))\r\n self.app.draw_text(\"listen\", self.app.font, (255, 255, 255), self.app.screen, 717, 120)\r\n if click:\r\n self.question.play_chord()\r\n else:\r\n self.app.screen.blit(self.play_icon, (700, 25))\r\n\r\n yes = -5\r\n if click:\r\n yes = 0\r\n for i in range(36):\r\n if self.buttons[i].collidepoint((mx, my)):\r\n if click:\r\n i = i % 12\r\n yes = 1\r\n self.point_x = mx\r\n self.point_y = my\r\n if (playing):\r\n\r\n self.piano_sound.set_note(self.sounds_list[i])\r\n self.piano_sound.play()\r\n playing = False\r\n else:\r\n self.piano_sound.set_note(self.sounds_list[i])\r\n self.piano_sound.play()\r\n playing = True\r\n self.question.receive_answer(i)\r\n self.question.display()\r\n\r\n if self.next_chord_button.collidepoint((mx, my)):\r\n self.app.screen.blit(pygame.image.load(\"icons/next_hov.jpg\"), (50, 50))\r\n self.app.draw_text(\"next\", self.app.font, (255, 255, 255), self.app.screen, 50 + 21, 120)\r\n if click:\r\n self.question.next_question()\r\n else:\r\n self.app.screen.blit(pygame.image.load(\"icons/next.jpg\"), (50, 50))\r\n\r\n if self.submit_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.submit_icon_hov, (700, 450))\r\n self.app.draw_text(\"submit\", self.app.font, (255, 255, 255), self.app.screen, 700, 550)\r\n if click:\r\n self.question.check_answer()\r\n else:\r\n self.app.screen.blit(self.submit_icon, (700, 450))\r\n\r\n if self.redo_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.redo_icon_hov, (50, 450))\r\n self.app.draw_text(\"redo\", self.app.font, (255, 255, 255), self.app.screen, 75, 550)\r\n if click:\r\n self.question.redo()\r\n else:\r\n self.app.screen.blit(self.redo_icon, (50, 450))\r\n\r\n self.app.screen.blit(self.note_icon1, (100, 200))\r\n\r\n self.app.screen.blit(pygame.image.load('icons/hand.png'), (mx - 25, my - 10))\r\n\r\n if yes == 0:\r\n self.point_x = 0\r\n if self.point_x:\r\n self.app.screen.blit(pygame.image.load('icons/ball.png'), (self.point_x - 25, self.point_y - 10))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.4476434588432312, "alphanum_fraction": 0.48392823338508606, "avg_line_length": 42.48245620727539, "blob_id": "1e5e0ded88fbd512985f1dc16a5d35aad8bc5112", "content_id": "76a2d9dbcbfd7c53c26eda1c02e44d64efe25966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5071, "license_type": "no_license", "max_line_length": 144, "num_lines": 114, "path": "/screens/piano_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom questions.choise_question import Choice_Question\r\nfrom screens.piano_tutorial_screen import Piano_tutorial_screen\r\nfrom questions.piano_question import Piano_Question\r\nimport random\r\n\r\n\r\nclass Piano_screen():\r\n def __init__(self, app):\r\n self.checked = 0\r\n self.app = app\r\n self.click = False\r\n self.q = Piano_Question(self.app, 0, \"choice\")\r\n self.tutorial_icon = pygame.image.load('icons/tutorial.jpg')\r\n self.piano_tutorial_screen = Piano_tutorial_screen(self.app)\r\n self.count = 0\r\n self.started = 0\r\n self.case = 0\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n\r\n def hover_photo(self):\r\n if self.case == 1:\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn_hov.jpg')\r\n else:\r\n self.next_submit_icon = pygame.image.load('icons/play_hov.jpg')\r\n\r\n def unhover_photo(self):\r\n if self.case == 1:\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn.jpg')\r\n else:\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n\r\n def piano(self):\r\n\r\n running = True\r\n next_button = pygame.Rect(250, 450, 400, 50)\r\n piano_button = pygame.Rect(775, 25, 100, 100)\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n\r\n self.app.screen.fill((0, 0, 0))\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n if piano_button.collidepoint((mx, my)):\r\n self.app.draw_text('explore & learn', self.app.font, (255, 255, 255), self.app.screen, 500, 40)\r\n if click:\r\n print(\"piano_tutorial\")\r\n self.piano_tutorial_screen.piano_tutorial()\r\n\r\n # self.app.draw_text('piano', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n\r\n if next_button.collidepoint((mx, my)):\r\n self.hover_photo()\r\n if click:\r\n if (self.started == 0):\r\n self.checked = 1\r\n self.started = 1\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn.jpg')\r\n self.case = 1\r\n if self.checked == 1:\r\n n = self.count % 3\r\n if (n == 1):\r\n self.q = Piano_Question(self.app, 0, \"listen\")\r\n elif (n == 2):\r\n self.q = Piano_Question(self.app, 0, \"read\")\r\n else:\r\n self.q = Piano_Question(self.app, 0, \"choice\")\r\n self.q.set_next()\r\n self.count += 1\r\n self.checked = 0\r\n else:\r\n self.q.check_answer()\r\n self.checked = 1\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n self.case = 0\r\n else:\r\n self.unhover_photo()\r\n if click:\r\n received = self.q.receive_answer(mx, my)\r\n\r\n # pygame.draw.rect(self.app.screen, (255, 162, 193), next_button)\r\n self.app.screen.blit(self.next_submit_icon, (250, 450))\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n self.app.screen.blit(self.tutorial_icon, (775, 25))\r\n self.q.display()\r\n x = 60\r\n y = 40\r\n if (self.started == 0):\r\n self.app.draw_text(\"Press 'continue' to start the test\", self.app.font, (255, 255, 255),\r\n self.app.screen, 150 + x, 120 + y)\r\n self.app.draw_text(\r\n \"Press the hint icon to explore\",\r\n self.app.font, (255, 255, 255), self.app.screen, 150 + x, 160 + y)\r\n self.app.draw_text(\r\n \"the piano's simulator \",\r\n self.app.font, (255, 255, 255), self.app.screen, 326 + x, 200 + y)\r\n self.app.draw_text(\r\n \"& the beginner's manual\",\r\n self.app.font, (255, 255, 255), self.app.screen, 280 + x, 240 + y)\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.5483092069625854, "avg_line_length": 18.799999237060547, "blob_id": "b12c81b38d6adc8adad86bcc722faa2e4b905f56", "content_id": "9c180594b47cf4f2a4d1164f510eb7e7b96e7e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 35, "num_lines": 20, "path": "/useful_classes/piano_sound.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "from pygame import mixer\r\n\r\nclass Piano_sound():\r\n def __init__(self,sound):\r\n self.sound=sound\r\n mixer.init()\r\n mixer.music.load(sound)\r\n mixer.music.set_volume(0.8)\r\n\r\n def set_note(self,note):\r\n mixer.music.load(note)\r\n\r\n def print(self):\r\n print(self.sound)\r\n\r\n def play(self):\r\n mixer.music.play()\r\n\r\n def stop(self):\r\n mixer.music.stop()" }, { "alpha_fraction": 0.5148355960845947, "alphanum_fraction": 0.5527933835983276, "avg_line_length": 41.52325439453125, "blob_id": "cbdfd3659ac21c34bdd41a20e6743d9b6a240c95", "content_id": "4aff9bfba305731c6dbce0ddef97a31f1acd0606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3741, "license_type": "no_license", "max_line_length": 146, "num_lines": 86, "path": "/screens/startup_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame,sys\r\nfrom pygame.locals import *\r\nfrom screens.login_screen import Login_screen\r\nfrom screens.signup_screen import Signup_screen\r\nfrom useful_classes.database import *\r\n\r\nclass StartUp():\r\n def __init__(self):\r\n pygame.init()\r\n self.color=(255, 162, 193)\r\n pygame.display.set_caption('Museek')\r\n self.screen = pygame.display.set_mode((900, 600), 0, 32)\r\n self.database_handler = Database(\r\n \"mongodb+srv://test:[email protected]/test?retryWrites=true&w=majority\")\r\n self.current_user=None\r\n self.font = pygame.font.SysFont('inkfree', 32)\r\n self.click=False\r\n self.login_screen=Login_screen(self)\r\n self.signup_screen = Signup_screen(self)\r\n self.mainClock = pygame.time.Clock()\r\n self.logo = pygame.image.load('icons/museek.jpg')\r\n self.bg = pygame.image.load('icons/art.jpg')\r\n self.bg1 = pygame.image.load('icons/art_flipped.jpg')\r\n self.signup_btn = pygame.image.load('icons/signup_btn.jpg')\r\n self.signup_btn_hov = pygame.image.load('icons/signup_btn_hov.jpg')\r\n self.login_btn = pygame.image.load('icons/login_btn.jpg')\r\n self.login_btn_hov = pygame.image.load('icons/login_btn_hov.jpg')\r\n program_Icon = pygame.image.load('icons/prgic.png')\r\n pygame.display.set_icon(program_Icon)\r\n\r\n def set_user(self,username):\r\n self.current_user=username\r\n\r\n def draw_text(self,text, font, color, surface, x, y):\r\n textobj = font.render(text, 1, color)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)\r\n\r\n def main_menu(self):\r\n while True:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n\r\n self.screen.fill((0, 0, 0))\r\n #self.draw_text(\"main menu\", self.font, (255, 255, 255), self.screen, 20,20)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n\r\n button_1 = pygame.Rect(250, 150, 400, 50)\r\n button_2 = pygame.Rect(250, 250, 400, 50)\r\n if button_1.collidepoint((mx, my)):\r\n self.screen.blit(self.login_btn_hov, (250, 150))\r\n if click:\r\n self.login_screen.login()\r\n #self.signup_screen.signup()\r\n else:\r\n self.screen.blit(self.login_btn, (250, 150))\r\n\r\n if button_2.collidepoint((mx, my)):\r\n self.screen.blit(self.signup_btn_hov, (250, 250))\r\n if click:\r\n self.signup_screen.signup()\r\n else:\r\n self.screen.blit(self.signup_btn, (250, 250))\r\n #pygame.draw.rect(self.screen, (255, 162, 193), button_1)\r\n #pygame.draw.rect(self.screen, (255, 162, 193), button_2)\r\n self.screen.blit(self.logo, (250, 20))\r\n self.screen.blit(self.bg, (20, 50))\r\n self.screen.blit(self.bg1, (700, 50))\r\n\r\n\r\n self.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n pygame.display.update()\r\n self.mainClock.tick(60)" }, { "alpha_fraction": 0.5457360148429871, "alphanum_fraction": 0.5717445015907288, "avg_line_length": 49.61643981933594, "blob_id": "161b0d5add139dc2b3c1fba6a9a97ce65c3c5118", "content_id": "3c725578f23f8f1cf651151a4d27aa852b886963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11304, "license_type": "no_license", "max_line_length": 144, "num_lines": 219, "path": "/screens/my_account_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom datetime import date\r\nfrom useful_classes.prediction_algorithm import *\r\n\r\n\r\nclass My_account_screen():\r\n def __init__(self, app):\r\n self.app = app\r\n self.click = False\r\n self.database_handler = self.app.database_handler\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n self.p_progress = 0\r\n self.p_regress = 0\r\n self.chords_progress = 0\r\n self.chords_regress = 0\r\n self.gn_progress = 0\r\n self.gn_regress = 0\r\n self.total_piano = 50\r\n self.total_chords = 20\r\n self.total_gn = 40\r\n self.p_days_left = 63\r\n self.chords_days_left = 35\r\n self.gn_days_left = 35\r\n self.o_day = None\r\n self.o_month = None\r\n self.o_year = None\r\n\r\n def date_difference(self, cd, cm, cy, od, om, oy):\r\n mfy = 0\r\n d = 0\r\n if (cm > om):\r\n mfy = (cy - oy) * 12 + cm - om - 1\r\n else:\r\n mfy = (cy - 1 - oy) * 12 + 12 - om - 1 + cm\r\n if (cd > od):\r\n mfy += 1\r\n d = cd - od\r\n else:\r\n d = 30 - od + cd\r\n d += mfy * 30\r\n if (d == 0):\r\n return 1\r\n else:\r\n return d\r\n\r\n def get_progress(self):\r\n print(\"gettin\")\r\n self.p_progress = 0\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n print(self.app.current_user)\r\n user = self.database_handler.exists(\"username\", self.app.current_user)\r\n\r\n if user:\r\n self.o_day = self.database_handler.get(\"username\", self.app.current_user, \"day\")\r\n self.o_month = self.database_handler.get(\"username\", self.app.current_user, \"month\")\r\n self.o_year = self.database_handler.get(\"username\", self.app.current_user, \"year\")\r\n self.p_progress += self.database_handler.get(\"username\", self.app.current_user, \"piano_c_s\")\r\n self.p_progress += self.database_handler.get(\"username\", self.app.current_user, \"piano_l_s\")\r\n self.p_progress += self.database_handler.get(\"username\", self.app.current_user, \"piano_r_s\")\r\n self.p_regress += self.database_handler.get(\"username\", self.app.current_user, \"piano_c_f\")\r\n self.p_regress += self.database_handler.get(\"username\", self.app.current_user, \"piano_l_f\")\r\n self.p_regress += self.database_handler.get(\"username\", self.app.current_user, \"piano_r_f\")\r\n self.chords_progress = self.database_handler.get(\"username\", self.app.current_user, \"chords_s\")\r\n self.chords_regress = self.database_handler.get(\"username\", self.app.current_user, \"chords_f\")\r\n self.gn_progress = self.database_handler.get(\"username\", self.app.current_user, \"gen_c_s\")\r\n self.gn_regress = self.database_handler.get(\"username\", self.app.current_user, \"gen_c_f\")\r\n print(\"progress\")\r\n print(self.chords_progress)\r\n print(\"progress\")\r\n print(self.p_progress)\r\n\r\n def get_mean(self, vector):\r\n sum = 0\r\n count = 0\r\n for element in vector:\r\n sum += element\r\n count += 1\r\n return float(sum / count)\r\n\r\n def graduation_expect(self):\r\n if (self.p_progress > 0 and self.p_regress > 0):\r\n left = self.total_piano - self.p_progress # numarul de intrebari pe care trebuie sa le completez\r\n vector = []\r\n for topic in range(11):\r\n vector.append([1, 0, 0, 0, 0, topic])\r\n for topic in range(11):\r\n vector.append([0, 1, 0, 0, 0, topic])\r\n for topic in range(11):\r\n vector.append([0, 0, 1, 0, 0, topic])\r\n\r\n n = Prediction_Algorithm(self.app) # antrenam un model cu datele pe care le avem despre progres\r\n n.train_model() # folosim SVM, vezi clasa Prediction_Algorithm\r\n prob = n.get_probabillity(vector) # aflam care este probabilitatea de a raspunde corect la intrebari\r\n # viitoare in functie de tip si topic\r\n\r\n today = date.today()\r\n c_day = int(today.strftime('%d'))\r\n c_month = int(today.strftime('%m'))\r\n c_year = int(today.strftime('%y'))\r\n\r\n days = self.date_difference(c_day, c_month, c_year, self.o_day, self.o_month, self.o_year)\r\n # ne folosim de data originii contului pentru a stabili frecventa cu care userul\r\n # Utilizeaza aplicatia\r\n frequency = float(self.p_progress / days)\r\n\r\n mean = self.get_mean(prob) # aflam probabilitatea medie de a raspunde corect la o intrebare\r\n self.p_days_left = int(left / float(frequency * mean))\r\n # aflam numarul de zile pana la finalizarea aplicatiei:\r\n # numarul de intrebari ramase impartit la\r\n # frecventa presiza/zi inmultita cu probabilitatea prezisa de a raspunde corect\r\n\r\n def chord_graduation_expect(self):\r\n if (self.chords_progress > 0 and self.chords_regress > 0):\r\n left = self.total_chords - self.chords_progress # numarul de intrebari pe care trebuie sa le completez\r\n vector = []\r\n for topic in [0, 2, 4, 5, 7, 9, 11]:\r\n vector.append([0, 0, 0, 0, 1, topic])\r\n\r\n n = Prediction_Algorithm(self.app) # antrenam un model cu datele pe care le avem despre progres\r\n n.train_model() # folosim SVM, vezi clasa Prediction_Algorithm\r\n prob = n.get_probabillity(vector) # aflam care este probabilitatea de a raspunde corect la intrebari\r\n # viitoare in functie de tip si topic\r\n\r\n today = date.today()\r\n c_day = int(today.strftime('%d'))\r\n c_month = int(today.strftime('%m'))\r\n c_year = int(today.strftime('%y'))\r\n\r\n days = self.date_difference(c_day, c_month, c_year, self.o_day, self.o_month, self.o_year)\r\n # ne folosim de data originii contului pentru a stabili frecventa cu care userul\r\n # Utilizeaza aplicatia\r\n frequency = float(self.chords_progress / days)\r\n\r\n mean = self.get_mean(prob) # aflam probabilitatea medie de a raspunde corect la o intrebare\r\n self.chords_days_left = int(left / float(frequency * mean))\r\n # aflam numarul de zile pana la finalizarea aplicatiei:\r\n # numarul de intrebari ramase impartit la\r\n # frecventa presiza/zi inmultita cu probabilitatea prezisa de a raspunde corect\r\n\r\n def gn_graduation_expect(self):\r\n if (self.gn_progress > 0 and self.gn_regress > 0):\r\n left = self.total_gn - self.gn_progress # numarul de intrebari pe care trebuie sa le completez\r\n vector = []\r\n for topic in range(11):\r\n vector.append([0, 0, 0, 1, 0, topic])\r\n\r\n n = Prediction_Algorithm(self.app) # antrenam un model cu datele pe care le avem despre progres\r\n n.train_model() # folosim SVM, vezi clasa Prediction_Algorithm\r\n prob = n.get_probabillity(vector) # aflam care este probabilitatea de a raspunde corect la intrebari\r\n # viitoare in functie de tip si topic\r\n\r\n today = date.today()\r\n c_day = int(today.strftime('%d'))\r\n c_month = int(today.strftime('%m'))\r\n c_year = int(today.strftime('%y'))\r\n\r\n days = self.date_difference(c_day, c_month, c_year, self.o_day, self.o_month, self.o_year)\r\n # ne folosim de data originii contului pentru a stabili frecventa cu care userul\r\n # Utilizeaza aplicatia\r\n frequency = float(self.gn_progress + self.gn_regress / days)\r\n\r\n mean = self.get_mean(prob) # aflam probabilitatea medie de a raspunde corect la o intrebare\r\n self.gn_days_left = int(left / float(frequency * mean))\r\n print(\"GN\")\r\n print(self.gn_days_left)\r\n # aflam numarul de zile pana la finalizarea aplicatiei:\r\n # numarul de intrebari ramase impartit la\r\n # frecventa presiza/zi inmultita cu probabilitatea prezisa de a raspunde corect\r\n\r\n def my_account(self):\r\n running = True\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n self.app.screen.fill((0, 0, 0)) #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n self.app.draw_text('Progress', self.app.font, self.app.color, self.app.screen, 330, 70)\r\n self.app.draw_text('Graduation', self.app.font, self.app.color, self.app.screen, 630, 70)\r\n self.app.draw_text('my account', self.app.font, self.app.color, self.app.screen, 20, 20)\r\n\r\n p_progress = int(self.p_progress * 200 / self.total_piano)\r\n chord_progress = int(self.chords_progress * 200 / self.total_chords)\r\n gn_progress = int(self.gn_progress * 200 / self.total_gn)\r\n\r\n pygame.draw.rect(self.app.screen, self.app.color, pygame.Rect(300, 150, p_progress, 30))\r\n pygame.draw.line(self.app.screen, (255, 162, 193), (350 - 8, 150),\r\n (350 - 8, 178))\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), pygame.Rect(300, 150, 200, 30), 1)\r\n self.app.draw_text('Piano basics', self.app.font, self.app.color, self.app.screen, 100, 145)\r\n self.app.draw_text(str(self.p_days_left*2) + ' days', self.app.font, self.app.color, self.app.screen, 650,\r\n 145)\r\n\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), pygame.Rect(300, 250, chord_progress, 30))\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), pygame.Rect(300, 250, 200, 30), 1)\r\n self.app.draw_text('Piano Chords', self.app.font, self.app.color, self.app.screen, 100, 245)\r\n self.app.draw_text(str(self.chords_days_left*3) + ' days', self.app.font, self.app.color, self.app.screen,\r\n 650, 245)\r\n\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), pygame.Rect(300, 350, gn_progress, 30))\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), pygame.Rect(300, 350, 200, 30), 1)\r\n self.app.draw_text('General', self.app.font, self.app.color, self.app.screen, 100, 345)\r\n self.app.draw_text(str(self.gn_days_left) + ' days', self.app.font, self.app.color, self.app.screen, 650,\r\n 345)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.6125186085700989, "alphanum_fraction": 0.6631892919540405, "avg_line_length": 31.549999237060547, "blob_id": "cc90852405b4472de1b147224bde7bed6c2243f9", "content_id": "a101127892731e466a5665abc0193ccf0bfdea1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 113, "num_lines": 40, "path": "/useful_classes/encryption.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "#code reused from my personal SI project (IIIrd year)\r\n\r\ndef split_blocks(message):\r\n return [message[i:i + 16] for i in range(0, len(message), 16)]\r\n\r\n#code borrowed from: https://stackoverflow.com/questions/66493774/how-to-calculate-the-xor-of-two-bytes-in-python\r\ndef XoR(t1, t2):\r\n return bytes([_a ^ _b for _a, _b in zip(t1, t2)])\r\n\r\ndef CBCencode(plain_text, key, initVector):\r\n whole_cypher_text = b''\r\n plain_blocks = split_blocks(plain_text)\r\n for plain_block in plain_blocks:\r\n plain_block = plain_block.encode()\r\n ciphertext = XoR(initVector, key)\r\n ciphertext = XoR(plain_block, ciphertext)\r\n whole_cypher_text += ciphertext[0:16]\r\n initVector = ciphertext\r\n print(whole_cypher_text)\r\n return whole_cypher_text\r\n\r\n\r\n# CBC decoding algorithm\r\ndef CBCdecode(whole_cypher_text, key, initVector):\r\n wholePlainText = ''\r\n whole_cypher_text = split_blocks(whole_cypher_text)\r\n for cipher in whole_cypher_text:\r\n plain = XoR(initVector, key)\r\n plain = XoR(cipher, plain)\r\n initVector = cipher\r\n wholePlainText += plain.decode()\r\n return wholePlainText\r\n\r\n\r\ndef encrypt(text):\r\n return CBCencode(text, b'1234567890123456', b'ddddcccc11118888')\r\n\r\n\r\ndef decrypt(text):\r\n return CBCdecode(text, b'1234567890123456', b'ddddcccc11118888')\r\n" }, { "alpha_fraction": 0.47540491819381714, "alphanum_fraction": 0.5287942290306091, "avg_line_length": 47.02941131591797, "blob_id": "fcb4db789eca8db28b0e33e75a3ef7f92e58aef6", "content_id": "210f7440555fbe6af0bef82ab849d5ae01312fc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3334, "license_type": "no_license", "max_line_length": 143, "num_lines": 68, "path": "/screens/piano_tutorial_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom screens.piano_notes_screen import Piano_notes_screen\r\nfrom screens.play_the_piano_screen import Play_the_piano_screen\r\n\r\n\r\nclass Piano_tutorial_screen():\r\n def __init__(self, app):\r\n self.app = app\r\n self.click = False\r\n self.notes_icon = pygame.image.load('icons/piano-help-bt.jpg')\r\n self.piano_notes_screen = Piano_notes_screen(self.app)\r\n self.piano_play_screen = Play_the_piano_screen(self.app)\r\n self.play_icon = pygame.image.load('icons/piano_play-bt.jpg')\r\n\r\n def piano_tutorial(self):\r\n running = True\r\n notes_button = pygame.Rect(550, 525, 100, 50)\r\n play_piano_button = pygame.Rect(250, 525, 100, 50)\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n self.app.screen.fill((0, 0, 0))\r\n # self.app.draw_text('piano tutorial screen', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n mx, my = pygame.mouse.get_pos()\r\n if notes_button.collidepoint((mx, my)):\r\n self.app.draw_text(\"beginner's manual\", self.app.font, (255, 255, 255), self.app.screen, 460, 470)\r\n if click:\r\n print(\"piano_notes\")\r\n self.piano_notes_screen.piano_notes()\r\n if play_piano_button.collidepoint((mx, my)):\r\n self.app.draw_text('play the piano', self.app.font, (255, 255, 255), self.app.screen, 200, 470)\r\n if click:\r\n print(\"piano_play\")\r\n self.piano_play_screen.play_the_piano()\r\n\r\n x = 60\r\n y = 50\r\n self.app.draw_text(\"Play our game today...\", self.app.font, (255, 255, 255), self.app.screen,\r\n 135 + x, 120 + y)\r\n self.app.draw_text(\r\n \"...to play the piano tomorrow\",\r\n self.app.font, (255, 255, 255), self.app.screen, 244 + x, 160 + y)\r\n self.app.draw_text(\r\n \"Do you need help?\",\r\n self.app.font, (255, 255, 255), self.app.screen, 326 - 60 + x, 250 + y)\r\n self.app.draw_text(\r\n \"Do you need to practice?\",\r\n self.app.font, (255, 255, 255), self.app.screen, 280 - 60 + x, 300 + y)\r\n\r\n self.app.screen.blit(self.notes_icon, (550, 525))\r\n self.app.screen.blit(self.play_icon, (250, 525))\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n\r\n self.app.screen.blit(self.app.logo, (250, 20))\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.5051681995391846, "alphanum_fraction": 0.5480172634124756, "avg_line_length": 52.29591751098633, "blob_id": "3e3b1ddcdfd6a805aa1706c4ebee62f2f7eb0d2b", "content_id": "78403ed0ece39560756bdbfe6d07602c44a737a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5321, "license_type": "no_license", "max_line_length": 118, "num_lines": 98, "path": "/questions/listen_question.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom useful_classes.piano_sound import Piano_sound\r\nimport random\r\n\r\n\r\nclass Listen_Question():\r\n def __init__(self, app, question_no):\r\n self.checked = 0\r\n self.mx = None\r\n self.my = None\r\n self.question_no = question_no\r\n self.database_handler = app.database_handler\r\n self.received_question = None\r\n self.received_answer = None\r\n self.app = app\r\n self.question = \"Press the note that you hear\"\r\n self.right_ans = 0\r\n self.sound = Piano_sound(\"sounds/piano-c.wav\")\r\n self.piano_sound = Piano_sound(\"sounds/piano-c.wav\")\r\n\r\n self.piano_icon = 'icons/play_icon.jpg'\r\n self.font = pygame.font.SysFont('inkfree', 32)\r\n self.a_buttons = []\r\n self.buttons_coord = [(450, 324, 30, 200), (466, 200, 18, 122), (484, 324, 30, 200),\r\n (501, 200, 18, 122), (518, 324, 30, 200), (534, 324, 30, 200),\r\n (550, 200, 18, 122), (567, 324, 30, 200), (582, 200, 18, 122),\r\n (600, 324, 30, 200), (615, 200, 18, 122), (632, 324, 30, 200)]\r\n self.sounds_list = [\"sounds/piano-c.wav\", \"sounds/piano-cd.wav\", \"sounds/piano-d.wav\", \"sounds/piano-eb.wav\",\r\n \"sounds/piano-e.wav\", \"sounds/piano-f.wav\", \"sounds/piano-fd.wav\", \"sounds/piano-g.wav\",\r\n \"sounds/piano-gd.wav\", \"sounds/piano-a.wav\", \"sounds/piano-bb.wav\", \"sounds/piano-b.wav\"]\r\n self.notes_icon_list = ['icons/piano_notes/do.jpg', 'icons/piano_notes/dod.jpg', 'icons/piano_notes/re.jpg',\r\n 'icons/piano_notes/mib.jpg', 'icons/piano_notes/mi.jpg', 'icons/piano_notes/fa.jpg',\r\n 'icons/piano_notes/fad.jpg', 'icons/piano_notes/sol.jpg',\r\n 'icons/piano_notes/lab.jpg', 'icons/piano_notes/la.jpg', 'icons/piano_notes/sib.jpg',\r\n 'icons/piano_notes/si.jpg']\r\n self.right_icons = ['icons/right_piano/do.jpg', 'icons/right_piano/dod.jpg', 'icons/right_piano/re.jpg',\r\n 'icons/right_piano/mib.jpg', 'icons/right_piano/mi.jpg', 'icons/right_piano/fa.jpg',\r\n 'icons/right_piano/fad.jpg', 'icons/right_piano/sol.jpg',\r\n 'icons/right_piano/lab.jpg', 'icons/right_piano/la.jpg', 'icons/right_piano/sib.jpg',\r\n 'icons/right_piano/si.jpg']\r\n self.play_button = pygame.Rect(250, 250, 100, 100)\r\n for btn in self.buttons_coord:\r\n b = pygame.Rect(btn)\r\n self.a_buttons.append(b)\r\n\r\n self.note_icon1 = pygame.image.load('icons/play_icon.jpg')\r\n self.piano_icon1 = pygame.image.load('icons/pianoo.jpg')\r\n\r\n def receive_answer(self, mx, my):\r\n for i in range(12):\r\n if (self.a_buttons[i].collidepoint((mx, my))):\r\n print(i)\r\n self.piano_sound.set_note(self.sounds_list[i])\r\n self.received_answer = i\r\n self.piano_icon1 = pygame.image.load(self.notes_icon_list[i])\r\n self.piano_sound.play()\r\n self.mx = mx\r\n self.my = my\r\n if self.play_button.collidepoint((mx, my)):\r\n self.sound.set_note(self.sounds_list[self.right_ans])\r\n self.sound.play()\r\n\r\n def check_answer(self):\r\n self.checked = 1\r\n self.piano_icon1 = pygame.image.load(self.right_icons[self.right_ans])\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n if self.right_ans == self.received_answer:\r\n self.database_handler.increment_database(\"username\", self.app.current_user, \"piano_l_s\", 1)\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n self.database_handler.insert(\r\n {\"piano_c\": 0, \"piano_l\": 1, \"piano_r\": 0, \"gen_c\": 0, \"chords\": 0,\r\n \"topic\": self.right_ans, \"result\": 1})\r\n else:\r\n self.checked = -1\r\n self.database_handler.increment_database(\"username\", self.app.current_user, \"piano_l_f\", 1)\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n self.database_handler.insert(\r\n {\"piano_c\": 0, \"piano_l\": 1, \"piano_r\": 0, \"gen_c\": 0, \"chords\": 0,\r\n \"topic\": self.right_ans, \"result\": 0})\r\n\r\n def set_random(self):\r\n i = random.randint(0, 11)\r\n self.right_ans = i\r\n self.piano_sound.set_note(self.sounds_list[i])\r\n\r\n\r\n def display(self):\r\n self.app.draw_text(self.question, self.font, (255, 255, 255), self.app.screen, 250, 50)\r\n self.app.screen.blit(self.note_icon1, (250, 250))\r\n self.app.screen.blit(self.piano_icon1, (450, 200))\r\n\r\n\r\n if self.mx and self.checked == -1:\r\n pygame.draw.line(self.app.screen, (255, 1, 1), (self.mx - 10, self.my + 10), (self.mx + 10, self.my - 10))\r\n pygame.draw.line(self.app.screen, (255, 1, 1), (self.mx - 10, self.my - 10), (self.mx + 10, self.my + 10))\r\n" }, { "alpha_fraction": 0.5637149214744568, "alphanum_fraction": 0.5669546723365784, "avg_line_length": 27.870967864990234, "blob_id": "cd2818e6bc7047015bf37d38de3be3415fd30964", "content_id": "29f47c5d14bfb523ccc991fe04905be347db611b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 926, "license_type": "no_license", "max_line_length": 56, "num_lines": 31, "path": "/questions/piano_question.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom questions.choise_question import Choice_Question\r\nfrom questions.read_question import Read_Question\r\nfrom questions.listen_question import Listen_Question\r\n\r\n\r\nclass Piano_Question():\r\n def __init__(self, app, question_no, type):\r\n self.app = app\r\n self.type = type\r\n if type == \"choice\":\r\n self.q = Choice_Question(self.app, 0)\r\n if type == \"read\":\r\n self.q = Read_Question(self.app, 0)\r\n if type == \"listen\":\r\n self.q = Listen_Question(self.app, 0)\r\n\r\n def set_next(self):\r\n if self.type == \"choice\":\r\n self.q.set_next()\r\n if self.type == \"read\" or self.type == \"listen\":\r\n self.q.set_random()\r\n\r\n def display(self):\r\n self.q.display()\r\n\r\n def receive_answer(self, mx, my):\r\n self.q.receive_answer(mx, my)\r\n\r\n def check_answer(self):\r\n self.q.check_answer()\r\n" }, { "alpha_fraction": 0.45239293575286865, "alphanum_fraction": 0.48513853549957275, "avg_line_length": 28.045454025268555, "blob_id": "70b5e9232dfd0354b629eecdd1b262769ef5d8e2", "content_id": "96dd9ab9d4c249288e200943d4435cf6a9907326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1985, "license_type": "no_license", "max_line_length": 118, "num_lines": 66, "path": "/useful_classes/prediction_algorithm.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "from math import sqrt\r\n\r\nfrom sklearn import svm\r\n\r\n\r\nclass Prediction_Algorithm:\r\n def __init__(self, app):\r\n self.app = app\r\n self.click = False\r\n self.database_handler = self.app.database_handler\r\n self.trained_model = None\r\n\r\n def train_model(self):\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n a = self.mycol.find()\r\n vectorX = []\r\n vectorY = []\r\n for instance in a:\r\n vectorX.append(\r\n [instance[\"piano_c\"], instance[\"piano_l\"], instance[\"piano_r\"], instance[\"gen_c\"], instance[\"chords\"],\r\n instance[\"topic\"]])\r\n vectorY.append(instance[\"result\"])\r\n clf = svm.SVC(probability=True)\r\n clf.fit(vectorX, vectorY)\r\n self.trained_model = clf\r\n # self.predict([0,0,0,1,0,1])\r\n\r\n def predict(self, type):\r\n a = self.trained_model.predict(type)\r\n print(a)\r\n\r\n def get_probabillity(self, type):\r\n a = self.trained_model.predict_proba(type)[:, 1]\r\n print(a)\r\n return a\r\n\r\n def gauss(self, x):\r\n g_x = (2.0 / (1.0 * sqrt(2.0 * 3.14))) * pow(2.718, -(x * x / 300.0))\r\n return 1 / g_x\r\n\r\n def get_biggest(self, type):\r\n a = self.get_probabillity(type)\r\n\r\n total = [10, 30, 20, 20, 30]\r\n done = [1, 3, 7, 5, 2]\r\n probs = [-1, -1, -1, -1, -1]\r\n\r\n for j in range(3):\r\n avg = 0.0\r\n count = 0\r\n for i in range(j * 11, 11 + 11 * j):\r\n avg += a[i]\r\n count += 1\r\n probs[j] = avg / count\r\n\r\n maxi = -999999.0\r\n maxi_el = None\r\n for i in range(5):\r\n probs[i] = probs[i] * self.gauss(2 * done[i] - total[i])\r\n if probs[i] > maxi:\r\n maxi = probs[i]\r\n maxi_el = i\r\n\r\n print(maxi_el)\r\n return maxi_el\r\n\r\n" }, { "alpha_fraction": 0.38518011569976807, "alphanum_fraction": 0.4473375082015991, "avg_line_length": 52.63101577758789, "blob_id": "beaa7c3d9364348a2cb7e919fb9b9faa0707eda6", "content_id": "29786232fb59e8d1e0806e7c8fefb7447a39b351", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10216, "license_type": "no_license", "max_line_length": 143, "num_lines": 187, "path": "/screens/play_the_piano_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom useful_classes.piano_sound import Piano_sound\r\nfrom useful_classes.hands_free import *\r\n\r\n\r\nclass Play_the_piano_screen():\r\n def __init__(self, app):\r\n self.text_audio = None\r\n self.app = app\r\n self.click = False\r\n self.point_x = 0\r\n self.point_y = 0\r\n self.mic_icon = pygame.image.load('icons/mic_on.jpg')\r\n self.mic_icon_off = pygame.image.load('icons/mic_off.jpg')\r\n self.note_icon1 = pygame.image.load('icons/pianoFUL.jpg')\r\n self.notex_title_list = ['C (Do)', 'C# (Do#)', 'D (Re)', 'Eb (Mi b)', 'E (Mi)', 'F (Fa)', 'F (Fa#)', 'G (Sol)',\r\n 'Ab (La b)', 'A (La)', 'Bb (Si b)', 'B (Si)']\r\n self.notes_icon_list1 = ['icons/piano_notes/do1.jpg', 'icons/piano_notes/dod1.jpg', 'icons/piano_notes/re1.jpg',\r\n 'icons/piano_notes/mib1.jpg', 'icons/piano_notes/mi1.jpg', 'icons/piano_notes/fa1.jpg',\r\n 'icons/piano_notes/fad1.jpg', 'icons/piano_notes/sol1.jpg',\r\n 'icons/piano_notes/lab1.jpg', 'icons/piano_notes/la1.jpg',\r\n 'icons/piano_notes/sib1.jpg', 'icons/piano_notes/si1.jpg']\r\n self.notes_icon_list = ['icons/piano_notes/do.jpg', 'icons/piano_notes/dod.jpg', 'icons/piano_notes/re.jpg',\r\n 'icons/piano_notes/mib.jpg', 'icons/piano_notes/mi.jpg', 'icons/piano_notes/fa.jpg',\r\n 'icons/piano_notes/fad.jpg', 'icons/piano_notes/sol.jpg',\r\n 'icons/piano_notes/lab.jpg', 'icons/piano_notes/la.jpg', 'icons/piano_notes/sib.jpg',\r\n 'icons/piano_notes/si.jpg']\r\n self.piano_sound = Piano_sound(\"sounds/piano-c.wav\")\r\n self.voice_notes = (\r\n \"play note c\", \"play note c charp\", \"play note d\", \"play note e flat\", \"play note e\", \"play note f\",\r\n \"play note fd\", \"play note g\", \"play note g diez\", \"play note a\", \"play note b flat\", \"play note b\")\r\n self.sounds_list = [\"sounds/piano-c.wav\", \"sounds/piano-cd.wav\", \"sounds/piano-d.wav\", \"sounds/piano-eb.wav\",\r\n \"sounds/piano-e.wav\", \"sounds/piano-f.wav\", \"sounds/piano-fd.wav\", \"sounds/piano-g.wav\",\r\n \"sounds/piano-ab.wav\", \"sounds/piano-a.wav\", \"sounds/piano-bb.wav\", \"sounds/piano-b.wav\"]\r\n self.button_coordinates_list = [(450, 200, 30, 200), (466, 200, 18, 122), (484, 200, 18, 200),\r\n (501, 200, 18, 122), (518, 200, 30, 200), (534, 200, 30, 200),\r\n (550, 200, 18, 122), (567, 200, 18, 200), (582, 200, 18, 122),\r\n (600, 200, 25, 200), (615, 200, 18, 122), (632, 200, 18, 200)]\r\n self.new_button_list = [(100, 348, 30, 82), (118, 200, 18, 150), (133, 348, 30, 82), (161, 200, 18, 150),\r\n (166, 348, 30, 82), (200, 348, 30, 82),\r\n (220, 200, 18, 150), (234, 348, 30, 82), (257, 200, 18, 150), (266, 348, 30, 82),\r\n (292, 200, 18, 150), (300, 348, 30, 82),\r\n (334, 348, 30, 82), (354, 200, 18, 150), (367, 348, 30, 82), (392, 200, 18, 150),\r\n (401, 348, 30, 82), (434, 348, 30, 82),\r\n (451, 200, 18, 150), (467, 348, 30, 82), (490, 200, 18, 150), (501, 348, 30, 82),\r\n (528, 200, 18, 150), (534, 348, 30, 82),\r\n (567, 348, 30, 82), (586, 200, 18, 150), (601, 348, 30, 82), (625, 200, 18, 150),\r\n (635, 348, 30, 82), (667, 348, 30, 82),\r\n (686, 200, 18, 150), (700, 348, 30, 82), (724, 200, 18, 150), (734, 348, 30, 82),\r\n (761, 200, 18, 150), (769, 348, 30, 82), ]\r\n self.sound_piano_button = pygame.Rect(450, 200, 30, 200)\r\n self.buttons = []\r\n for coord in self.new_button_list:\r\n self.buttons.append(pygame.Rect(coord))\r\n self.hands_free_button = pygame.Rect(0, 0, 100, 100)\r\n\r\n def text_to_command(self, text):\r\n selected = -1\r\n greetings = [\"hello\", \"hi\", \"good morning\", \"good evening\", \"good afternoon\", \"goodnight\", \"have a great day\"]\r\n negations = [\"don't\", \"do not\", \"avoid\"]\r\n grateful = [\"thank you\", \"thanks\", \"grateful\", \"appreciate\"]\r\n convo = [\"how are you\", \"how you doing\", \"how is your\", \"how was your\", \"how do you\", \"you alright\"]\r\n notes = [\"note c\", \"c sharp\", \"note d\", \"e flat\", \"note e\", \"note f\", \"f sharp\", \"note g\", \"g sharp\", \"note a\",\r\n \"b flat\", \"note b\"]\r\n aprobs = [\"play\", \"sound\", \"hear\", \"listen to\"]\r\n\r\n for greet in greetings:\r\n if text.find(greet) >= 0:\r\n self.text_audio = greet + \", \" + self.app.current_user + \"!\"\r\n return -1\r\n for question in convo:\r\n if text.find(question) >= 0:\r\n self.text_audio = \"I am pretty fine, thank you, \" + self.app.current_user\r\n return -1\r\n for thank in grateful:\r\n if text.find(thank) >= 0:\r\n self.text_audio = \"It really was my pleasure, \" + self.app.current_user\r\n return -1\r\n for i in range(11):\r\n if text.find(notes[i]) >= 0:\r\n selected = i\r\n print(\"found note\")\r\n for i in range(3):\r\n if text.find(negations[i]) >= 0:\r\n if i == 2:\r\n self.text_audio = \"Ok, we'll avoid to\"\r\n else:\r\n for j in range(4):\r\n if text.find(aprobs[j]) >= 0:\r\n print(aprobs[j])\r\n print(j)\r\n if j < 2:\r\n self.text_audio = \"Ok, we won't play it\"\r\n else:\r\n self.text_audio = \"Ok \" + self.app.current_user + \", you won't \" + aprobs[j] + \" it\"\r\n # pygame.display.update()\r\n return -1\r\n for i in aprobs:\r\n if text.find(i):\r\n if (selected > -1):\r\n self.text_audio = \"...playing \" + notes[selected]\r\n else:\r\n self.text_audio = \"I'm sorry, I didn't understand that\"\r\n return selected\r\n return -1\r\n\r\n def play_the_piano(self):\r\n running = True\r\n playing = True\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True #end: ode borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n self.app.screen.fill((0, 0, 0))\r\n # self.app.draw_text('play_the_piano', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n if self.hands_free_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.mic_icon_off, (0, 0))\r\n self.text_audio = \"...listening...\"\r\n if click:\r\n\r\n n = Hands_Free()\r\n text = n.voice_input()\r\n nott = self.text_to_command(text)\r\n\r\n if nott != -1:\r\n if (playing):\r\n self.piano_sound.set_note(self.sounds_list[nott])\r\n self.piano_sound.play()\r\n playing = False\r\n else:\r\n self.piano_sound.set_note(self.sounds_list[nott])\r\n self.piano_sound.play()\r\n playing = True\r\n\r\n else:\r\n if self.text_audio == \"...listening...\":\r\n self.text_audio = None\r\n self.app.screen.blit(self.mic_icon, (0, 0))\r\n # self.mic_icon = pygame.image.load('icons/mic_on.jpg')\r\n\r\n yes = -5\r\n if click:\r\n yes = 0\r\n print(str(mx) + ' ' + str(my))\r\n for i in range(36):\r\n if self.buttons[i].collidepoint((mx, my)):\r\n\r\n if click:\r\n yes = 1\r\n self.point_x = mx\r\n self.point_y = my\r\n self.text_audio = None\r\n # self.login_screen.login()\r\n if (playing):\r\n print(i)\r\n self.piano_sound.set_note(self.sounds_list[i % 12])\r\n # self.note_icon1=pygame.image.load(self.notes_icon_list[i])\r\n self.piano_sound.play()\r\n playing = False\r\n else:\r\n self.piano_sound.set_note(self.sounds_list[i % 12])\r\n # self.note_icon1 = pygame.image.load(self.notes_icon_list[i])\r\n self.piano_sound.play()\r\n playing = True\r\n if yes == 0:\r\n self.point_x = 0\r\n self.app.draw_text(self.text_audio, self.app.font, (255, 255, 255), self.app.screen, 120, 20)\r\n\r\n self.app.screen.blit(self.note_icon1, (100, 200))\r\n\r\n self.app.screen.blit(pygame.image.load('icons/hand.png'), (mx - 25, my - 10))\r\n\r\n if self.point_x:\r\n self.app.screen.blit(pygame.image.load('icons/ball.png'), (self.point_x - 25, self.point_y - 10))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.4462047815322876, "alphanum_fraction": 0.4762355089187622, "avg_line_length": 41.15306091308594, "blob_id": "1b33b9a430a0802f248f4241b8aa7d76347b6cc6", "content_id": "742ab58229e2a4d4077050889c9720b105ce043e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4229, "license_type": "no_license", "max_line_length": 146, "num_lines": 98, "path": "/screens/recommended_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom questions.choise_question import Choice_Question\r\nfrom questions.gen_kno_question import Gen_Kno_Question\r\nfrom screens.piano_tutorial_screen import Piano_tutorial_screen\r\nfrom questions.piano_question import Piano_Question\r\nimport random\r\n\r\nfrom useful_classes.prediction_algorithm import Prediction_Algorithm\r\n\r\n\r\nclass Recommended_screen():\r\n def __init__(self, app):\r\n self.checked = 0\r\n self.app = app\r\n self.click = False\r\n self.q = Piano_Question(self.app, 0, \"choice\")\r\n self.piano_tutorial_screen = Piano_tutorial_screen(self.app)\r\n self.count = 0\r\n self.started = 0\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n\r\n def recommended(self):\r\n\r\n running = True\r\n next_button = pygame.Rect(250, 450, 400, 50)\r\n while running:\r\n click = False\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n\r\n self.app.screen.fill((0, 0, 0))\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n # self.app.draw_text('piano', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n\r\n if next_button.collidepoint((mx, my)):\r\n\r\n if click:\r\n vector = []\r\n for topic in range(11):\r\n vector.append([1, 0, 0, 0, 0, topic])\r\n for topic in range(11):\r\n vector.append([0, 1, 0, 0, 0, topic])\r\n for topic in range(11):\r\n vector.append([0, 0, 1, 0, 0, topic])\r\n n = Prediction_Algorithm(self.app) # antrenam un model cu datele pe care le avem despre progres\r\n n.train_model()\r\n n = n.get_biggest(vector)\r\n\r\n if (self.started == 0):\r\n self.checked = 1\r\n self.started = 1\r\n self.next_submit_icon = pygame.image.load('icons/submit_btn.jpg')\r\n if self.checked == 1:\r\n # n=self.count%5\r\n if (n == 1):\r\n self.q = Piano_Question(self.app, 0, \"listen\")\r\n elif (n == 2):\r\n self.q = Piano_Question(self.app, 0, \"read\")\r\n elif (n == 3):\r\n self.q = Piano_Question(self.app, 0, \"choice\")\r\n else:\r\n self.q = Gen_Kno_Question(self.app, 0)\r\n self.q.set_next()\r\n self.count += 1\r\n self.checked = 0\r\n else:\r\n self.q.check_answer()\r\n self.checked = 1\r\n self.next_submit_icon = pygame.image.load('icons/play.jpg')\r\n else:\r\n if click:\r\n received = self.q.receive_answer(mx, my)\r\n\r\n # pygame.draw.rect(self.app.screen, (255, 162, 193), next_button)\r\n self.app.screen.blit(self.next_submit_icon, (250, 450))\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n self.q.display()\r\n x = 60\r\n y = 40\r\n if (self.started == 0):\r\n self.app.draw_text(\"Press 'continue' to start the test\", self.app.font, (255, 255, 255),\r\n self.app.screen, 150 + x, 120 + y)\r\n\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.4632934629917145, "alphanum_fraction": 0.4890524446964264, "avg_line_length": 39.80769348144531, "blob_id": "dfcd23457533ac4ce21e45a30207e9e32bb8fb90", "content_id": "b65ef21d9765b6d20bc6871b80b936e518eb2273", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5435, "license_type": "no_license", "max_line_length": 139, "num_lines": 130, "path": "/screens/login_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom screens.menu_screen import Menu_screen\r\nfrom useful_classes.inputBox import InputBox\r\nfrom useful_classes.encryption import *\r\nfrom screens.forgot_password import *\r\n\r\n\r\nclass Login_screen():\r\n def __init__(self, app):\r\n self.app = app\r\n self.menu_screen = Menu_screen(self.app)\r\n self.forgot_password_screen = Forgot_password_screen(self.app)\r\n self.click = False\r\n self.icon = pygame.image.load('icons/login.jpg')\r\n self.play_icon = pygame.image.load('icons/play.jpg')\r\n self.play_icon_hov = pygame.image.load('icons/play_hov.jpg')\r\n self.u = None\r\n self.p = None\r\n self.database_handler = self.app.database_handler\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users\")\r\n self.complete_fields = 0\r\n self.succes = None\r\n self.my_account_icon = pygame.image.load('icons/my_account.jpg')\r\n self.my_account_button = pygame.Rect(370, 525, 159, 25)\r\n self.clickky = 0\r\n\r\n def database_check(self):\r\n print(\"hai verilor\")\r\n self.clickky = 0\r\n q = self.database_handler.exists(\"username\", self.u)\r\n if q:\r\n password = self.database_handler.get(\"username\", self.u, \"password\")\r\n if password == self.p:\r\n self.succes = 1\r\n self.app.set_user(self.u)\r\n else:\r\n self.succes = 0\r\n\r\n\r\n else:\r\n self.succes = 0\r\n\r\n def login(self):\r\n running = True\r\n username_input = InputBox(250, 200, 400, 50, \"username\")\r\n password_input = InputBox(250, 300, 400, 50, \"password\")\r\n input_boxes = [username_input, password_input]\r\n menu_button = pygame.Rect(250, 450, 400, 50)\r\n\r\n while running:\r\n click = False\r\n self.app.screen.fill((0, 0, 0)) # start\r\n for event in pygame.event.get(): #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.key == K_RETURN: # code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n u = username_input.handle_event(event)\r\n p = password_input.handle_event(event, True)\r\n if u != None:\r\n self.u = u\r\n if p:\r\n self.p = encrypt(p)\r\n\r\n print(self.u)\r\n print(self.p)\r\n if self.u and self.p:\r\n self.complete_fields = 1\r\n\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n\r\n username_input.handle_event(event)\r\n password_input.handle_event(event, True)\r\n for box in input_boxes:\r\n box.update()\r\n\r\n if (self.succes == 0):\r\n if (self.clickky == 0):\r\n text = 'Wrong username or password'\r\n if click:\r\n text = ' '\r\n self.clickky = 1\r\n self.app.draw_text(text, self.app.font, (255, 1, 1), self.app.screen, 250,\r\n 350)\r\n # self.app.draw_text('login', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n mx, my = pygame.mouse.get_pos()\r\n if menu_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.play_icon_hov, (250, 450))\r\n if click and self.complete_fields:\r\n self.database_check()\r\n if (self.succes == 1):\r\n self.menu_screen.get_user_score()\r\n self.menu_screen.menu()\r\n else:\r\n self.app.screen.blit(self.play_icon, (250, 450))\r\n\r\n if self.my_account_button.collidepoint((mx, my)):\r\n if click:\r\n self.forgot_password_screen.forgot_password()\r\n self.app.screen.blit(self.icon, (250, 20))\r\n\r\n for box in input_boxes:\r\n box.draw(self.app.screen)\r\n\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n\r\n if self.my_account_button.collidepoint((mx, my)):\r\n self.app.draw_text('Forgot your password?', pygame.font.SysFont('inkfree', 16, bold=True),\r\n self.app.color,\r\n self.app.screen, 370,\r\n 525)\r\n else:\r\n self.app.draw_text('Forgot your password?', pygame.font.SysFont('inkfree', 16), self.app.color,\r\n self.app.screen, 370,\r\n 525)\r\n\r\n if click:\r\n print(mx)\r\n print(my)\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n pygame.display.flip()\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.5462605953216553, "alphanum_fraction": 0.5790285468101501, "avg_line_length": 46.48598098754883, "blob_id": "ecf1b6b58e73e980f9a3006e6e57201491825249", "content_id": "85e2366c651b78af3cfc6306d3eac2755808a323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5188, "license_type": "no_license", "max_line_length": 111, "num_lines": 107, "path": "/questions/choise_question.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\n\r\n\r\nclass Choice_Question():\r\n def __init__(self, app, question_no):\r\n self.checked = 0\r\n self.question_no = question_no\r\n self.outcome = \"\"\r\n self.app = app\r\n self.image = \"None\"\r\n self.topic = None\r\n self.received_answer = -2\r\n self.question = \"\"\r\n self.answers = [\"\", \"\", \"\", \"\"]\r\n self.right_ans = -2\r\n self.font = pygame.font.SysFont('inkfree', 32)\r\n self.database_handler = self.app.database_handler\r\n self.database_handler.database_init(\"questions\")\r\n self.mycol = self.database_handler.set_collection(\"piano_questions\")\r\n self.a_buttons = []\r\n self.buttons_coord = [(200, 150 - 5, 50 - 10, 50 - 10), (200, 225 - 5, 40, 40), (200, 300 - 5, 40, 40),\r\n (200, 375 - 5, 40, 40)]\r\n for i in range(4):\r\n b = pygame.Rect(self.buttons_coord[i])\r\n self.a_buttons.append(b)\r\n\r\n def get_question_no(self):\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n s_count = self.database_handler.get(\"username\", self.app.current_user, \"piano_c_s\")\r\n s_count += self.database_handler.get(\"username\", self.app.current_user, \"piano_c_f\")\r\n print(\"TOTAL\")\r\n print(s_count)\r\n self.question_no = s_count\r\n\r\n def set_next(self):\r\n\r\n self.get_question_no()\r\n self.database_handler.database_init(\"questions\")\r\n self.mycol = self.database_handler.set_collection(\"piano_questions\")\r\n question = self.database_handler.get(\"question_no\", self.question_no, \"question\")\r\n a1 = self.database_handler.get(\"question_no\", self.question_no, \"a1\")\r\n a2 = self.database_handler.get(\"question_no\", self.question_no, \"a2\")\r\n a3 = self.database_handler.get(\"question_no\", self.question_no, \"a3\")\r\n a4 = self.database_handler.get(\"question_no\", self.question_no, \"a4\")\r\n right_ans = self.database_handler.get(\"question_no\", self.question_no, \"correct\")\r\n self.image = self.database_handler.get(\"question_no\", self.question_no, \"image\")\r\n self.topic = self.database_handler.get(\"question_no\", self.question_no, \"topic\")\r\n answers = [a1, a2, a3, a4]\r\n self.question = question\r\n self.answers = answers\r\n self.right_ans = right_ans\r\n self.question_no += 1\r\n\r\n def receive_answer(self, mx, my):\r\n print(mx)\r\n print(my)\r\n for i in range(4):\r\n if (self.a_buttons[i].collidepoint((mx, my))):\r\n self.received_answer = i\r\n print(i)\r\n\r\n def check_answer(self):\r\n self.checked = 1\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n if (self.right_ans == self.received_answer):\r\n self.outcome = \"Congratulations!\"\r\n self.database_handler.increment_database(\"username\", self.app.current_user, \"piano_c_s\", 1)\r\n print(\"incremented\")\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n self.database_handler.insert(\r\n {\"piano_c\": 1, \"piano_l\": 0, \"piano_r\": 0, \"gen_c\": 0, \"chords\": 0,\r\n \"topic\": self.topic, \"result\": 1})\r\n\r\n else:\r\n self.outcome = \"I'm sorry! That was not correct!\"\r\n self.database_handler.increment_database(\"username\", self.app.current_user, \"piano_c_f\", 1)\r\n self.database_handler.database_init(\"users_progress\")\r\n self.mycol = self.database_handler.set_collection(self.app.current_user)\r\n self.database_handler.insert(\r\n {\"piano_c\": 1, \"piano_l\": 0, \"piano_r\": 0, \"gen_c\": 0, \"chords\": 0,\r\n \"topic\": self.topic, \"result\": 0})\r\n\r\n self.database_handler.database_init(\"questions\")\r\n self.mycol = self.database_handler.set_collection(\"piano_questions\")\r\n\r\n def display(self):\r\n self.app.draw_text(self.question, self.font, (255, 255, 255), self.app.screen, 250, 50)\r\n\r\n for i in range(4):\r\n self.app.draw_text(self.answers[i], self.font, (255, 255, 255), self.app.screen, 250, 150 + i * 75)\r\n\r\n if self.received_answer > -1 and self.received_answer < 4:\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), self.buttons_coord[self.received_answer])\r\n\r\n if self.question != \"\":\r\n for i in range(4):\r\n pygame.draw.rect(self.app.screen, (255, 162, 193), pygame.Rect(self.buttons_coord[i]), 1)\r\n if self.image != \"None\":\r\n self.app.screen.blit(pygame.image.load(self.image), (450, 200))\r\n\r\n if self.checked == 1 and self.received_answer > -1:\r\n self.app.draw_text(self.outcome, self.font, (255, 255, 255), self.app.screen, 250, 100)\r\n pygame.draw.rect(self.app.screen, (255, 1, 1), self.buttons_coord[self.received_answer])\r\n pygame.draw.rect(self.app.screen, (1, 255, 1), self.buttons_coord[self.right_ans])\r\n" }, { "alpha_fraction": 0.455826073884964, "alphanum_fraction": 0.506260871887207, "avg_line_length": 52.24528121948242, "blob_id": "1dde370cdf6ac24849c90d1666fd4f9463e4b91d", "content_id": "0a759a25233c40635309bd3ee47a2477a1e68639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5750, "license_type": "no_license", "max_line_length": 143, "num_lines": 106, "path": "/screens/piano_notes_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom useful_classes.piano_sound import Piano_sound\r\n\r\n\r\nclass Piano_notes_screen():\r\n def __init__(self, app):\r\n\r\n self.notex_title_list = ['C (Do)', 'C# (Do#)', 'D (Re)', 'Eb (Mi b)', 'E (Mi)', 'F (Fa)', 'F (Fa#)', 'G (Sol)',\r\n 'Ab (La b)', 'A (La)', 'Bb (Si b)', 'B (Si)']\r\n self.notes_icon_list1 = ['icons/piano_notes/do1.jpg', 'icons/piano_notes/dod1.jpg', 'icons/piano_notes/re1.jpg',\r\n 'icons/piano_notes/mib1.jpg', 'icons/piano_notes/mi1.jpg', 'icons/piano_notes/fa1.jpg',\r\n 'icons/piano_notes/fad1.jpg', 'icons/piano_notes/sol1.jpg',\r\n 'icons/piano_notes/lab1.jpg', 'icons/piano_notes/la1.jpg',\r\n 'icons/piano_notes/sib1.jpg', 'icons/piano_notes/si1.jpg']\r\n self.notes_icon_list = ['icons/piano_notes/do.jpg', 'icons/piano_notes/dod.jpg', 'icons/piano_notes/re.jpg',\r\n 'icons/piano_notes/mib.jpg', 'icons/piano_notes/mi.jpg', 'icons/piano_notes/fa.jpg',\r\n 'icons/piano_notes/fad.jpg', 'icons/piano_notes/sol.jpg',\r\n 'icons/piano_notes/lab.jpg', 'icons/piano_notes/la.jpg', 'icons/piano_notes/sib.jpg',\r\n 'icons/piano_notes/si.jpg']\r\n self.sounds_list = [\"sounds/piano-c.wav\", \"sounds/piano-cd.wav\", \"sounds/piano-d.wav\", \"sounds/piano-eb.wav\",\r\n \"sounds/piano-e.wav\", \"sounds/piano-f.wav\", \"sounds/piano-fd.wav\", \"sounds/piano-g.wav\",\r\n \"sounds/piano-gd.wav\", \"sounds/piano-a.wav\", \"sounds/piano-bb.wav\", \"sounds/piano-b.wav\"]\r\n self.button_coordinates_list = [(450, 200, 30, 200), (466, 200, 18, 122), (484, 200, 18, 200),\r\n (501, 200, 18, 122), (518, 200, 30, 200), (534, 200, 30, 200),\r\n (550, 200, 18, 122), (567, 200, 18, 200), (582, 200, 18, 122),\r\n (600, 200, 25, 200), (615, 200, 18, 122), (632, 200, 18, 200)]\r\n self.app = app\r\n self.click = False\r\n self.count = 0\r\n self.piano_sound = Piano_sound(\"sounds/piano-c.wav\")\r\n self.font = pygame.font.SysFont('inkfree', 32)\r\n self.note_icon1 = pygame.image.load('icons/piano_notes/do.jpg')\r\n self.note_icon2 = pygame.image.load('icons/piano_notes/do1.jpg')\r\n self.note_title = \"C (Do)\"\r\n # self.sound_play_button = pygame.Rect(250, 150, 400, 50)\r\n self.sound_piano_button = pygame.Rect(450, 200, 30, 200)\r\n self.next_icon = pygame.image.load('icons/next.jpg')\r\n self.next_button = pygame.Rect(400, 450, 100, 50)\r\n\r\n def next_note(self):\r\n if self.count == 11:\r\n self.count = 0\r\n else:\r\n self.count += 1\r\n self.note_title = self.notex_title_list[self.count]\r\n self.note_icon1 = pygame.image.load(self.notes_icon_list[self.count])\r\n self.note_icon2 = pygame.image.load(self.notes_icon_list1[self.count])\r\n self.sound_piano_button = pygame.Rect(self.button_coordinates_list[self.count])\r\n self.piano_sound.set_note(self.sounds_list[self.count])\r\n print(\"next\")\r\n\r\n def piano_notes(self):\r\n running = True\r\n\r\n playing = True\r\n while running:\r\n click = False\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n self.app.screen.fill((0, 0, 0)) #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n # self.app.draw_text('piano notes screen', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n\r\n if self.sound_piano_button.collidepoint((mx, my)):\r\n if click:\r\n # self.login_screen.login()\r\n if (playing):\r\n print(\"playin\")\r\n self.piano_sound.play()\r\n playing = False\r\n else:\r\n print(\"npt playin\")\r\n self.piano_sound.play()\r\n playing = True\r\n\r\n print(\"clicked\")\r\n pass\r\n\r\n if click:\r\n print(mx)\r\n print(my)\r\n if self.next_button.collidepoint((mx, my)):\r\n self.app.draw_text(\"next note\", self.app.font, (255, 255, 255), self.app.screen, 397, 500)\r\n if click:\r\n self.next_note()\r\n\r\n self.app.screen.blit(self.note_icon1, (450, 200))\r\n self.app.screen.blit(self.note_icon2, (250, 250))\r\n self.app.screen.blit(self.next_icon, (400, 450))\r\n self.app.draw_text(self.note_title, self.font, (255, 255, 255), self.app.screen, 400, 50)\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n # pygame.draw.rect(self.app.screen, (255, 162, 193), self.sound_play_button)\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.484836220741272, "alphanum_fraction": 0.5103113651275635, "avg_line_length": 44.080745697021484, "blob_id": "d8ded79e07d7309900b8b64c0635bb0ea21f1a57", "content_id": "52db6f8ea1528d1e16e70985b0b98966c8451dbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7419, "license_type": "no_license", "max_line_length": 148, "num_lines": 161, "path": "/screens/signup_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom screens.menu_screen import Menu_screen\r\nfrom useful_classes.inputBox import InputBox\r\nfrom datetime import date\r\nfrom useful_classes.encryption import *\r\n\r\n\r\nclass Signup_screen():\r\n def __init__(self, app):\r\n self.app = app\r\n self.menu_screen = Menu_screen(self.app)\r\n self.click = False\r\n self.icon = pygame.image.load('icons/signup.jpg')\r\n self.play_icon = pygame.image.load('icons/play.jpg')\r\n self.play_icon_hov = pygame.image.load('icons/play_hov.jpg')\r\n self.u = None\r\n self.p = None\r\n self.n = None\r\n self.font = pygame.font.SysFont('inkfree', 22)\r\n self.database_handler = self.app.database_handler\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users\")\r\n self.complete_fields = 0\r\n self.already_exists = 0\r\n self.success = 0\r\n self.recovery_questions = [\"your first dog\", \"your first crush\", \"your favourite flavour\",\r\n \"your favourite uncle\", \"your secret talent\"]\r\n self.recovery_index = 0\r\n self.recovery_question = \"your first dog\"\r\n\r\n def insert_into_database(self, name, username, password):\r\n print(\"insert into database new user\")\r\n q = self.database_handler.exists(\"username\", self.u)\r\n if (q):\r\n print(\"the username already exists\")\r\n self.already_exists = 1\r\n else:\r\n self.already_exists = 0\r\n self.database_handler.insert({\"username\": self.u, \"password\": self.p,\r\n \"recovery_question\": self.recovery_questions[self.recovery_index],\r\n \"recovery_answer\": self.n})\r\n self.create_user_data()\r\n self.success = 1\r\n self.app.set_user(self.u)\r\n\r\n def create_user_data(self):\r\n print(\"here\")\r\n today = date.today()\r\n day = today.strftime('%d')\r\n month = today.strftime('%m')\r\n year = today.strftime('%y')\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users_data\")\r\n self.database_handler.insert(\r\n {\"username\": self.u, \"piano_c_s\": 0, \"piano_c_f\": 0, \"piano_l_s\": 0, \"piano_l_f\": 0, \"piano_r_s\": 0,\r\n \"piano_r_f\": 0, \"chords_s\": 0, \"chords_f\": 0, \"gen_c_s\": 0, \"gen_c_f\": 0, \"day\": int(day),\r\n \"month\": int(month), \"year\": int(year)})\r\n self.database_handler.database_init(\"users\")\r\n self.mycol = self.database_handler.set_collection(\"users\")\r\n print(\"here\")\r\n\r\n def signup(self):\r\n running = True\r\n nickname_input = InputBox(250, 150, 400, 50, \"your recovery answer\")\r\n username_input = InputBox(250, 250, 400, 50, \"username\")\r\n password_input = InputBox(250, 350, 400, 50, \"password\")\r\n input_boxes = [nickname_input, username_input, password_input]\r\n menu_button = pygame.Rect(250, 450, 400, 50)\r\n next_button = pygame.Rect(660, 110, 30, 30)\r\n back_next_button = pygame.Rect(210, 110, 30, 30)\r\n next_icon = pygame.image.load(\"icons/little_next.jpg\")\r\n back_next_icon = pygame.image.load(\"icons/little_backwards_next.jpg\")\r\n\r\n while running:\r\n click = False\r\n self.app.screen.fill((0, 0, 0))\r\n for event in pygame.event.get(): #start: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.key == K_RETURN:\r\n u = username_input.handle_event(event)\r\n if u != None:\r\n self.u = u\r\n p = password_input.handle_event(event, True)\r\n if p:\r\n self.p = encrypt(p)\r\n n = nickname_input.handle_event(event)\r\n if n:\r\n self.n = n\r\n print(self.n)\r\n print(self.u)\r\n print(self.p)\r\n if self.u and self.n and self.p:\r\n self.complete_fields = 1\r\n\r\n if event.type == MOUSEBUTTONDOWN: #end: code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.button == 1:\r\n click = True\r\n\r\n nickname_input.handle_event(event)\r\n username_input.handle_event(event)\r\n password_input.handle_event(event, True)\r\n for box in input_boxes:\r\n box.update()\r\n\r\n # self.app.draw_text('signup', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n if (self.already_exists):\r\n self.app.draw_text('The username already exists', self.app.font, (255, 255, 255), self.app.screen, 20,\r\n 500)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n if next_button.collidepoint((mx, my)):\r\n if click:\r\n if self.recovery_index == 4:\r\n self.recovery_index = 0\r\n else:\r\n self.recovery_index += 1\r\n self.recovery_question = self.recovery_questions[self.recovery_index]\r\n\r\n if back_next_button.collidepoint((mx, my)):\r\n if click:\r\n if self.recovery_index == 0:\r\n self.recovery_index = 4\r\n else:\r\n self.recovery_index -= 1\r\n self.recovery_question = self.recovery_questions[self.recovery_index]\r\n\r\n if menu_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.play_icon_hov, (250, 450))\r\n if click and self.complete_fields:\r\n print(\"insert\")\r\n self.insert_into_database(self.n, self.u, self.p)\r\n if self.success:\r\n self.menu_screen.menu()\r\n else:\r\n self.app.screen.blit(self.play_icon, (250, 450))\r\n self.app.screen.blit(self.icon, (250, 20))\r\n\r\n nickname_input.draw(self.app.screen)\r\n for box in input_boxes:\r\n if box != nickname_input:\r\n box.draw(self.app.screen)\r\n\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n self.app.screen.blit(next_icon, (660, 110))\r\n self.app.screen.blit(back_next_icon, (210, 110))\r\n self.app.draw_text(\"recovery question: \" + self.recovery_question, self.font, self.app.color,\r\n self.app.screen,\r\n 250, 110)\r\n\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n pygame.display.flip()\r\n # pygame.draw.rect(self.app.screen, (255, 162, 193), menu_button)\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.7285714149475098, "avg_line_length": 16, "blob_id": "8cbddb1782ff21602640f497b870872d5a8e88b3", "content_id": "2bbc0ab0b8504b6eedbbeca9822fc6316ecea120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/main.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "from startup_screen import StartUp\r\n\r\napp = StartUp()\r\napp.main_menu()" }, { "alpha_fraction": 0.4748775362968445, "alphanum_fraction": 0.5051084756851196, "avg_line_length": 40.529762268066406, "blob_id": "028aeccd8f5a1fbb44014fd14d66df723eae61d8", "content_id": "ec84c305ee92a7bca9a7554ae9431d4cfc39384b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7145, "license_type": "no_license", "max_line_length": 154, "num_lines": 168, "path": "/screens/feedback_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom useful_classes.inputBox import InputBox\r\nfrom useful_classes.encryption import *\r\n\r\n\r\n# from screens.forgot_password import Forgot_password_screen\r\n\r\nclass Feedback_screen():\r\n def __init__(self, app):\r\n self.app = app\r\n self.click = False\r\n self.rate0 = pygame.image.load('icons/filled_note.jpg')\r\n self.rate1 = pygame.image.load('icons/empty_note.jpg')\r\n self.current_collide_rate = -1\r\n self.play_icon = pygame.image.load('icons/send_icon.jpg')\r\n self.font = pygame.font.SysFont('inkfree', 20)\r\n self.u = None\r\n self.database_handler = self.app.database_handler\r\n self.database_handler.database_init(\"feedback\")\r\n self.mycol = self.database_handler.set_collection(\"feedbacks\")\r\n self.feedbacks = [\"Amazing\", \"Wonderful\", \"Fun and useful\"]\r\n self.setted_rating = -1\r\n self.complete_fields = 0\r\n self.succes = None\r\n self.avg_rating = 5.00\r\n self.rating_buttons = []\r\n x = 350\r\n y = 50\r\n for i in range(5):\r\n self.rating_buttons.append(pygame.Rect(x + i * 35, y, 35, 60))\r\n\r\n def get_rating_fromdb(self):\r\n self.database_handler.database_init(\"feedback\")\r\n self.mycol = self.database_handler.set_collection(\"ratings\")\r\n ratings = self.mycol.find()\r\n sum = 0.0\r\n count = 0\r\n for rate in ratings:\r\n sum += rate[\"rating\"]\r\n count += 1\r\n self.avg_rating = sum / count\r\n\r\n def rate(self):\r\n\r\n self.database_handler.database_init(\"feedback\")\r\n self.mycol = self.database_handler.set_collection(\"ratings\")\r\n q = self.database_handler.exists(\"username\", self.app.current_user)\r\n if q:\r\n print(\"da\")\r\n self.mycol.update(\r\n {\"username\": self.app.current_user},\r\n {\"username\": self.app.current_user, \"rating\": self.setted_rating + 1})\r\n else:\r\n self.database_handler.insert(\r\n {\"username\": self.app.current_user, \"rating\": self.setted_rating + 1})\r\n self.get_rating_fromdb()\r\n\r\n def database_check(self):\r\n print(\"hai verilor\")\r\n self.database_handler.database_init(\"feedback\")\r\n self.mycol = self.database_handler.set_collection(\"feedbacks\")\r\n self.database_handler.insert(\r\n {\"username\": self.u, \"feedback\": self.u})\r\n\r\n def get_from_database(self):\r\n self.database_handler.database_init(\"feedback\")\r\n self.mycol = self.database_handler.set_collection(\"feedbacks\")\r\n feedbacks = self.mycol.find()\r\n m = self.mycol.find()\r\n cn = 0\r\n for c in m:\r\n cn += 1\r\n self.feedbacks[0] = feedbacks[cn - 3][\"feedback\"]\r\n self.feedbacks[1] = feedbacks[cn - 2][\"feedback\"]\r\n self.feedbacks[2] = feedbacks[cn - 1][\"feedback\"]\r\n self.get_rating_fromdb()\r\n\r\n def feedback_screen(self):\r\n running = True\r\n username_input = InputBox(250, 475, 400, 50, \"feedback\")\r\n # menu_button = pygame.Rect(250, 450, 400, 50)\r\n menu_button = pygame.Rect(650, 475, 50, 50)\r\n\r\n while running:\r\n click = False\r\n self.app.screen.fill((0, 0, 0)) #beginning\r\n for event in pygame.event.get(): #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n if event.key == K_RETURN:\r\n u = username_input.handle_event(event) #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if u != None: #end\r\n self.u = u\r\n if self.u:\r\n self.complete_fields = 1\r\n\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n\r\n username_input.handle_event(event)\r\n\r\n username_input.update()\r\n\r\n if (self.succes == 0):\r\n self.app.draw_text('Wrong username or password', self.app.font, (255, 255, 255), self.app.screen, 20,\r\n 500)\r\n # self.app.draw_text('login', self.app.font, (255, 255, 255), self.app.screen, 20, 20)\r\n mx, my = pygame.mouse.get_pos()\r\n if menu_button.collidepoint((mx, my)):\r\n if click and self.complete_fields:\r\n print(\"click\")\r\n self.database_check()\r\n username_input = InputBox(250, 475, 400, 50, \"feedback\")\r\n self.get_from_database()\r\n if (self.succes == 1):\r\n pass\r\n # self.menu_screen.menu()\r\n self.current_collide_rate = -1\r\n for i in range(5):\r\n if self.rating_buttons[i].collidepoint((mx, my)):\r\n self.current_collide_rate = i\r\n if click:\r\n print(self.current_collide_rate)\r\n self.setted_rating = self.current_collide_rate\r\n self.rate()\r\n\r\n self.app.screen.blit(self.play_icon, (650, 475))\r\n\r\n username_input.draw(self.app.screen)\r\n\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n self.app.draw_text(\"Avg rating: \" + str(self.avg_rating), self.font, (255, 255, 255), self.app.screen, 350,\r\n 115)\r\n if (self.setted_rating > -1):\r\n self.app.draw_text(\"Your rating: \" + str(self.setted_rating + 1) + '.0', self.font, (255, 255, 255),\r\n self.app.screen,\r\n 350,\r\n 145)\r\n\r\n for i in range(3):\r\n self.app.draw_text('\"' + self.feedbacks[i] + '\"', self.app.font, (255, 255, 255), self.app.screen, 200,\r\n 225 + 50 * i)\r\n\r\n x = 350\r\n y = 50\r\n\r\n if self.current_collide_rate == -1:\r\n self.current_collide_rate = self.setted_rating\r\n if self.current_collide_rate != -1:\r\n for i in range(self.current_collide_rate + 1):\r\n self.app.screen.blit(self.rate0, (x + i * 35, y))\r\n for i in range(self.current_collide_rate + 1, 5):\r\n self.app.screen.blit(self.rate1, (x + i * 35, y))\r\n else:\r\n for i in range(5):\r\n self.app.screen.blit(self.rate1, (x + i * 35, y))\r\n\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n pygame.display.flip()\r\n\r\n pygame.display.update()\r\n" }, { "alpha_fraction": 0.5653382539749146, "alphanum_fraction": 0.5736793279647827, "avg_line_length": 36.53571319580078, "blob_id": "de926150e29caabb0f2fd091a1abebd4a4a01ddc", "content_id": "813403e2700da832ade2a33c41e675698fb045cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 129, "num_lines": 28, "path": "/useful_classes/hands_free.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import speech_recognition as sr\r\n\r\n# code inspired by: https://www.thepythoncode.com/article/using-speech-recognition-to-convert-speech-to-text-python\r\nclass Hands_Free():\r\n def __init__(self):\r\n self.r = sr.Recognizer()\r\n\r\n #beginning: code inspired by: https://www.thepythoncode.com/article/using-speech-recognition-to-convert-speech-to-text-python\r\n def voice_input(self):\r\n run=1\r\n while (run):\r\n try:\r\n with sr.Microphone() as source2:\r\n self.r.adjust_for_ambient_noise(source2, duration=0.2)\r\n audio2 = self.r.listen(source2)\r\n\r\n text = self.r.recognize_google(audio2)\r\n text = text.lower()\r\n\r\n run=0\r\n return text\r\n\r\n except sr.RequestError as e:\r\n print(\"error\".format(e))\r\n\r\n except sr.UnknownValueError:\r\n print(\"error\")\r\n #end: code inspired by: https://www.thepythoncode.com/article/using-speech-recognition-to-convert-speech-to-text-python\r\n" }, { "alpha_fraction": 0.5034779906272888, "alphanum_fraction": 0.5368670225143433, "avg_line_length": 45.6026496887207, "blob_id": "ecf286426c70e3025d5304b04233caee70e13539", "content_id": "56ab12b0783488fc193648bfa8f06b9e24b8887e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7188, "license_type": "no_license", "max_line_length": 143, "num_lines": 151, "path": "/screens/menu_screen.py", "repo_name": "costinaioana18/Museek", "src_encoding": "UTF-8", "text": "import pygame, sys\r\nfrom pygame.locals import *\r\nfrom screens.piano_screen import Piano_screen\r\nfrom screens.general_kno_screen import General_kno_screen\r\nfrom screens.recommended_screen import Recommended_screen\r\nfrom screens.my_account_screen import My_account_screen\r\nfrom screens.piano_chords_screen import Piano_chords_screen\r\nfrom screens.feedback_screen import Feedback_screen\r\n\r\n\r\nclass Menu_screen():\r\n def __init__(self, app):\r\n self.app = app\r\n self.click = False\r\n self.piano_screen = Piano_screen(self.app)\r\n self.chords_screen = Piano_chords_screen(self.app)\r\n self.general_kno_screen = General_kno_screen(self.app)\r\n self.recommended_screen = Recommended_screen(self.app)\r\n self.my_account_screen = My_account_screen(self.app)\r\n self.piano_icon = pygame.image.load('icons/piano.jpg')\r\n self.guitar_icon = pygame.image.load('icons/guitar.jpg')\r\n self.recommended_icon = pygame.image.load('icons/recommended.jpg')\r\n self.general_kno_icon = pygame.image.load('icons/general_kno.jpg')\r\n self.my_account_icon = pygame.image.load('icons/my_account.jpg')\r\n self.piano_icon_hov = pygame.image.load('icons/piano_hov.jpg')\r\n self.guitar_icon_hov = pygame.image.load('icons/guitar_hov.jpg')\r\n self.recommended_icon_hov = pygame.image.load('icons/recommended_hov.jpg')\r\n self.general_kno_icon_hov = pygame.image.load('icons/general_kno_hov.jpg')\r\n self.my_account_icon = pygame.image.load('icons/my_account.jpg')\r\n self.locked_chord_icon = pygame.image.load('icons/locked_chord.jpg')\r\n self.feedback_screen = Feedback_screen(self.app)\r\n self.user_score = 0\r\n\r\n def get_user_score(self):\r\n if self.app.current_user:\r\n database_handler = self.app.database_handler\r\n database_handler.database_init(\"users\")\r\n mycol = database_handler.set_collection(\"users_data\")\r\n user = database_handler.exists(\"username\", self.app.current_user)\r\n user_score = 0\r\n if user:\r\n user_score += database_handler.get(\"username\", self.app.current_user, \"piano_c_s\")\r\n user_score += database_handler.get(\"username\", self.app.current_user, \"piano_l_s\")\r\n user_score += database_handler.get(\"username\", self.app.current_user, \"piano_r_s\")\r\n self.user_score = user_score\r\n return user_score\r\n\r\n def menu(self):\r\n\r\n running = True\r\n while running:\r\n click = False\r\n\r\n for event in pygame.event.get(): #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n self.app.screen.fill((0, 0, 0)) #code borrowed and improved from source: https://www.youtube.com/watch?v=0RryiSjpJn0&t=386s\r\n #end\r\n if click:\r\n self.get_user_score()\r\n # self.app.draw_text( \"Nice to C you, \"+self.app.current_user, self.app.font, (255, 255, 255), self.app.screen, 20, 10)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n piano_button = pygame.Rect(250, 150, 400, 50)\r\n guitar_button = pygame.Rect(250, 250, 400, 50)\r\n recommended_button = pygame.Rect(250, 350, 400, 50)\r\n general_kno_button = pygame.Rect(250, 450, 400, 50)\r\n my_account_button = pygame.Rect(400, 525, 100, 25)\r\n feedback_button = pygame.Rect(400, 555, 100, 25)\r\n\r\n if piano_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.piano_icon_hov, (250, 150))\r\n if click:\r\n self.piano_screen.piano()\r\n else:\r\n self.app.screen.blit(self.piano_icon, (250, 150))\r\n\r\n if guitar_button.collidepoint((mx, my)):\r\n if click:\r\n if self.get_user_score() > 10:\r\n self.chords_screen.play_the_piano()\r\n\r\n if recommended_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.recommended_icon_hov, (250, 350))\r\n if click:\r\n self.recommended_screen.recommended()\r\n else:\r\n self.app.screen.blit(self.recommended_icon, (250, 350))\r\n\r\n if general_kno_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.general_kno_icon_hov, (250, 450))\r\n if click:\r\n self.general_kno_screen.general_kno()\r\n else:\r\n self.app.screen.blit(self.general_kno_icon, (250, 450))\r\n\r\n if my_account_button.collidepoint((mx, my)):\r\n if click:\r\n self.my_account_screen.get_progress()\r\n self.my_account_screen.graduation_expect()\r\n self.my_account_screen.gn_graduation_expect()\r\n self.my_account_screen.chord_graduation_expect()\r\n print(\"here\")\r\n self.my_account_screen.my_account()\r\n else:\r\n self.app.draw_text(\"My account\", pygame.font.SysFont('inkfree', 16, bold=True), (255, 168, 176),\r\n self.app.screen,\r\n 408, 525)\r\n\r\n else:\r\n self.app.draw_text(\"My account\", pygame.font.SysFont('inkfree', 16), (255, 168, 176),\r\n self.app.screen,\r\n 408, 525)\r\n\r\n if feedback_button.collidepoint((mx, my)):\r\n\r\n self.app.draw_text(\"Feedback\", pygame.font.SysFont('inkfree', 16, bold=True), (255, 168, 176),\r\n self.app.screen,\r\n 413, 555)\r\n if click:\r\n self.feedback_screen.get_from_database()\r\n self.feedback_screen.feedback_screen()\r\n else:\r\n\r\n self.app.draw_text(\"Feedback\", pygame.font.SysFont('inkfree', 16), (255, 168, 176), self.app.screen,\r\n 413, 555)\r\n\r\n if self.user_score < 11:\r\n self.app.screen.blit(self.locked_chord_icon, (250, 250))\r\n else:\r\n if guitar_button.collidepoint((mx, my)):\r\n self.app.screen.blit(self.guitar_icon_hov, (250, 250))\r\n else:\r\n self.app.screen.blit(self.guitar_icon, (250, 250))\r\n\r\n self.app.screen.blit(self.app.bg, (20, 50))\r\n self.app.screen.blit(self.app.bg1, (700, 50))\r\n\r\n self.app.screen.blit(self.app.logo, (250, 20))\r\n\r\n self.app.screen.blit(pygame.image.load('icons/mouse.png'), (mx - 25, my - 25))\r\n\r\n pygame.display.update()\r\n" } ]
26
TuneKid/ConnectFour
https://github.com/TuneKid/ConnectFour
4be9111f532127f23ab06a8566d50e3b7d2bc8fe
2d540fbe21075f24b7b9e1c173ab1f31acc57233
255bb62ada4ca4f02c5ba27bce83dbcb29ae9419
refs/heads/master
2023-04-05T11:16:22.383528
2021-04-14T23:56:57
2021-04-14T23:56:57
344,299,287
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5627819299697876, "alphanum_fraction": 0.5747126340866089, "avg_line_length": 27.514522552490234, "blob_id": "281d9762c139c077d8536e3f9de8d3978ac81a44", "content_id": "21fa74da3010d09d835e2630fd7366bc10c52613", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6873, "license_type": "no_license", "max_line_length": 138, "num_lines": 241, "path": "/main.py", "repo_name": "TuneKid/ConnectFour", "src_encoding": "UTF-8", "text": "N_COLS = 7\nN_ROWS = 6\n\ndef init():\n board = [[\".\"]*N_COLS for i in range(N_ROWS)]\n return board\n\ndef print_board(board):\n print(\"1 2 3 4 5 6 7\")\n print(\"_\"*13)\n for row in board:\n print(\"|\".join(row))\n\n\ndef get_move(board, player):\n col = int(input(\"Player {} enter a column:\".format(player)))\n \n\n if not is_valid_col(col):\n print(\"Invalid Input! Please enter input between 1 and {}\".format(N_COLS))\n col = get_move(board, player)\n\n col -= 1\n return col\n ''''\n for r in range(len(board) - 1, -1, -1):\n if (not placed) and board[r][col] == '.':\n board[r][col] = player\n placed = True\n '''\ndef make_move(board, player, col):\n r = len(board)-1\n placed = False\n while not placed and r >= 0:\n if board[r][col] == '.':\n board[r][col]=player\n placed = True\n print(placed)\n r -= 1\n if not placed:\n print(\"Invalid Input! This column is full!\")\n col = get_move(board, player)\n return board\n\ndef is_valid_col(col):\n return col > 0 and col <= N_COLS\n\ndef has_won(board, player):\n return checkRow(board, player) or checkCol(board,player) or checkLeftDiag(board,player) or checkRightDiag(board, player)\ndef checkRow(board,player):\n numConsecutiveRow = 0\n rRow = 0\n #loop through rows\n #Start at the beginning of each row, and set a counter for each character to the right\n won = False\n while rRow < N_ROWS and not won:\n \n cRow= 0\n numConsecutiveRow = 0\n while cRow < N_COLS and not won: \n if board[rRow][cRow] == player:\n numConsecutiveRow += 1\n print(\"Detected player Row: {} at r: {} c: {} n: {}\".format(player, rRow, cRow, numConsecutiveRow))\n print(\"__\")\n else:\n numConsecutiveRow = 0\n won = numConsecutiveRow >= 4\n if won:\n print(\"HooOooOoOOoOOoOoOOoOooOOoorRrRRRRrrrrRrrrRrRrrrrRrrrAAyAYAYYYyyyYYYyYYyYY player {} won!\".format(player))\n\n cRow += 1\n rRow += 1\n return won\n\ndef checkCol(board, player):\n \n numConsecutiveCol = 0\n rCol = 0\n #loop through rows\n #Start at the beginning of each row, and set a counter for each character to the right\n wonCol = False\n while rCol < N_COLS and not wonCol:\n \n cCol= 0\n numConsecutiveCol = 0\n while cCol < N_ROWS and not wonCol: \n if board[cCol][rCol] == player:\n numConsecutiveCol += 1\n \n else:\n numConsecutiveCol = 0\n wonCol = numConsecutiveCol >= 4\n if wonCol:\n print(\"HooOooOoOOoOOoOoOOoOooOOoorAAyAYAYYYyyyYYYyYYyY player {} won!\".format(player))\n\n cCol += 1\n rCol += 1\n return wonCol\n \n\ndef checkLeftDiag(board,player):\n rLeftDiag = 0\n i = 0\n #loop through rows\n #Start at the beginning of each row, and set a counter for each character to the right\n won = False\n while rLeftDiag < N_ROWS and not won:\n \n cLeftDiag= 0\n numConsecutiveRightDiag = 0\n while cLeftDiag < N_COLS and not won and rLeftDiag < N_ROWS: \n if board[rLeftDiag][cLeftDiag] == player:\n numConsecutiveRightDiag += 1\n print(\"Detected player Right Diag: {} at r: {} c: {} n: {}\".format(player, rLeftDiag, cLeftDiag, numConsecutiveRightDiag))\n print(\"__\")\n \n else:\n numConsecutiveRightDiag = 0\n wonRightDiag = numConsecutiveRightDiag >=4\n if wonRightDiag:\n #Check diag logic goes here\n print(\"HooOooOoOOoOOoOoOOoOooOOoorRrRRRRrrrrRrrrRrRrrrrRrrrAAyAYAYYYyyyYYYyYYyYY player {} won!\".format(player))\n cLeftDiag += 1\n rLeftDiag += 1\n \n\n i += 1\n rLeftDiag = i\n\n\n\n cLeftDiag = 1\n i = cLeftDiag\n\n while cLeftDiag < N_COLS and not won:\n \n rLeftDiag = 0\n numConsecutiveRightDiag = 0\n while cLeftDiag < N_COLS and not won and rLeftDiag < N_ROWS: \n if board[rLeftDiag][cLeftDiag] == player:\n numConsecutiveRightDiag += 1\n print(\"Detected player Right Diag: {} at r: {} c: {} n: {}\".format(player, rLeftDiag, cLeftDiag, numConsecutiveRightDiag))\n print(\"__\")\n \n else:\n numConsecutiveRightDiag = 0\n wonRightDiag = numConsecutiveRightDiag >=4\n if wonRightDiag:\n #Check diag logic goes here\n print(\"HooOooOoOOoOOoOoOOoOooOOoorRrRRRRrrrrRrrrRrRrrrrRrrrAAyAYAYYYyyyYYYyYYyYY player {} won!\".format(player))\n cLeftDiag += 1\n rLeftDiag += 1\n \n\n i += 1\n cLeftDiag = i\n\n return won \ndef checkRightDiag(board, player):\n rRightDiag = len(board) - 1\n i = 0\n \n won = False\n while rRightDiag >= 0 and not won:\n \n cRightDiag= 0\n numConsecutiveRightDiag = 0\n while cRightDiag < N_COLS and not won and rRightDiag >= 0: \n if board[rRightDiag][cRightDiag] == player:\n numConsecutiveRightDiag += 1\n \n print(\"__\")\n \n else:\n numConsecutiveRightDiag = 0\n wonRightDiag = numConsecutiveRightDiag >=4\n if wonRightDiag:\n print(\"HooOooOoOOoOOoOoOOoOooOOoorRrRRRRrrrrRrrrRrRrrrrRrrrAAyAYAYYYyyyYYYyYYyYY player {} won!\".format(player))\n cRightDiag += 1\n rRightDiag -= 1\n \n\n i += 1\n rRightDiag = N_ROWS-1-i\n\n\n cRightDiag = 1\n i = cRightDiag\n\n #loop through rows\n #Start at the beginning of each row, and set a counter for each character to the right\n while cRightDiag< N_COLS and not won:\n \n rRightDiag = N_ROWS-1\n numConsecutiveRightDiag = 0\n while cRightDiag < N_COLS and not won and rRightDiag >= 0: \n if board[rRightDiag][cRightDiag] == player:\n numConsecutiveRightDiag += 1\n \n print(\"__\")\n \n else:\n numConsecutiveRightDiag = 0\n wonRightDiag = numConsecutiveRightDiag >=4\n if wonRightDiag:\n #Check diag logic goes here\n print(\"HooOooOoOOoOOoOoOOoOooOOoorRrRRRRrrrrRrrrRrRrrrrRrrrAAyAYAYYYyyyYYYyYYyYY player {} won!\".format(player))\n cRightDiag += 1\n rRightDiag -= 1\n \n\n i += 1\n cRightDiag = i\n return won\n\n\n \n \n \ndef main():\n\n player = '#'\n board = init()\n print_board(board)\n num_turns = 1\n\n game_over = False\n while not game_over and num_turns <= N_ROWS * N_COLS:\n col = get_move(board, player)\n board = make_move(board, player, col)\n game_over = has_won(board, player)\n player = \"@\" if player == \"#\" else \"#\"\n num_turns += 1\n\n\n print_board(board)\n if not game_over:\n print(\"Tie Game\")\n else:\n print(\"Player {} wins!\".format(\"@\" if player == \"#\" else \"#\"))\nmain()\n\n" } ]
1
dvelazquez/DriverDetect
https://github.com/dvelazquez/DriverDetect
63e0faa209ee536518cdc126cbb85ca124b67844
bba6728c36ae460e88a89257386cb32a7b9c98eb
c9958d3db905cb6822cc3d51460b7f5f61ba05c9
refs/heads/master
2020-03-18T16:29:12.399305
2018-07-22T03:05:37
2018-07-22T03:05:37
134,967,990
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5136457681655884, "alphanum_fraction": 0.6054959297180176, "avg_line_length": 35.390411376953125, "blob_id": "8b24d3e3f3df898c9575385cd3b27273917ac6bb", "content_id": "2e0adf47ef0f2fa9e9d763b424f52407ec1d95db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5313, "license_type": "no_license", "max_line_length": 195, "num_lines": 146, "path": "/DriverDetect.py", "repo_name": "dvelazquez/DriverDetect", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''\nDaniel Velazquez - May 2018\nDriver Detection and Sleepy Warning\nbased on OpenCV Sample facedetect.py\n\nface detection using haar cascades\n\nUSAGE:\n facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]\n'''\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\nfrom timeit import default_timer as timer\n\n\nimport numpy as np\nimport cv2\n\n# local modules\nfrom video import create_capture\nfrom common import clock, draw_str\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# Warning signs\nTriangle = cv2.imread('Warning.svg')\n\nFrameCount= 1.0\nDriverAway= 1.0\nDriverPresent= 1.0\nDriverSleepy= 1.0\n\nVertical= 0\n\nGeneralCounter_start = timer()\nDriverOKCounter_start = timer()\nSleepyCounter_start = timer()\nDriverNGCounter_start = timer()\nGeneralCounter_end = timer()\nDriverOKCounter_end = timer()\nSleepyCounter_end = timer()\nDriverNGCounter_end = timer()\n\n\ndef detect(img, cascade):\n rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE)\n if len(rects) == 0:\n return []\n rects[:,2:] += rects[:,:2]\n return rects\n\ndef draw_rects(img, rects, color):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)\n\ndef ScanFace(img, rects):\n\twhile Vertical < y2:\n\t\tcv2.line(img,(x1,y1),(x2,Vertical),(255,255,255),1)\n\t\tVertical += 1\n\n\n\nif __name__ == '__main__':\n import sys, getopt\n print(__doc__)\n\n args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])\n try:\n video_src = video_src[0]\n except:\n video_src = 0\n args = dict(args)\n cascade_fn = args.get('--cascade', \"data/haarcascade_frontalface_alt.xml\")\n nested_fn = args.get('--nested-cascade', \"data/haarcascade_eye.xml\")\n\n cascade = cv2.CascadeClassifier(cascade_fn)\n nested = cv2.CascadeClassifier(nested_fn)\n\n cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')\n\n while True:\n ret, img = cam.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n\n rects = detect(gray, cascade)\n vis = img.copy()\n output = vis.copy()\n overlay = vis.copy()\n draw_rects(vis, rects, (0, 255, 0))\t\t\t\t\t# GREEN Square on Face BGR\n for x1, y1, x2, y2 in rects:\n\t\t\tVertical=y1\n\t\t\twhile Vertical < y2:\n\t\t\t\tcv2.line(vis,(x1,Vertical),(x2,Vertical),(255,0,0),1)\n\t\t\t\tVertical+=5\n if (len(rects)) == 0:\n\t\t\tDriverAway=DriverAway+1.0\n\t\t\tcv2.putText(vis,'Driver Away',(10,60), font, 2,(255,255,255),2,cv2.LINE_AA)\n\n if not nested.empty():\n for x1, y1, x2, y2 in rects:\n #cv2.rectangle(overlay, (0,0),(1200,y1), (0, 255, 0), -1) # TOP GREEN Line\n #cv2.rectangle(overlay, (0,y2),(1200,600), (0, 255, 0), -1) # GREEN RED Line\n #cv2.rectangle(overlay, (0,0),(x1,600), (0, 255, 0), -1) # LEFT GREEN Line\n #cv2.rectangle(overlay, (x2,0),(1200,600), (0, 255, 0), -1) # RIGHT GREEN Line\n cv2.addWeighted(overlay, 0.2, vis, 1 - 0.0, 0, vis)\n roi = gray[y1:y2, x1:x2]\n vis_roi = vis[y1:y2, x1:x2]\n subrects = detect(roi.copy(), nested)\n #draw_rects(vis_roi, subrects, (255, 0, 0))\n cv2.putText(vis,'Driver Present',(10,60), font, 2,(255,255,255),2,cv2.LINE_AA)\n DriverPresent=DriverPresent+1.0\n if DriverPresent>=15:\n\t\t\t\t\tif (len(subrects)) < 2:\n\t\t\t\t\t\tcv2.putText(vis,'Driver Sleepy',(10,120), font, 2,(255,255,255),2,cv2.LINE_AA)\n\t\t\t\t\t\tcv2.rectangle(overlay, (x1,y1),(x2,y2), (0, 0, 255), -1) # RED Square\n\t\t\t\t\t\tcv2.addWeighted(overlay, 0.5, vis, 1, 1, vis)\n\t\t\t\t\t\tDriverSleepy=DriverSleepy+1.0\n\t\t\t\t\t\tDriverPresent=DriverPresent-1.0\n\n\t\t\n\t\t\n FrameCount=FrameCount+1\n #print(\"Frame Count\",FrameCount)\n #print(\"Driver Away\",DriverAway)\n #print(\"Driver Present\",DriverPresent)\n #print(\"Driver Sleepy\",DriverSleepy)\n #cv2.putText(vis,'Frame Count',(785,490), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(vis,'Driver Away',(785,505), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(vis,'Driver Present',(785,520), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(vis,'Driver Sleepy',(785,535), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n #cv2.putText(vis, \"{:.0f}\".format(FrameCount),(910,490), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(vis, \"{:.1f}\".format((DriverAway/FrameCount)*100.0),(910,505), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(vis, \"{:.1f}\".format(((DriverPresent-1)/FrameCount)*100.0),(910,520), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(vis, \"{:.1f}\".format((DriverSleepy/FrameCount)*100.0),(910,535), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n #cv2.putText(vis,'Total:',(785,475), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n #cv2.putText(vis, \"{:.1f}\".format( ((DriverSleepy/FrameCount)*100.0)+((DriverPresent/FrameCount)*100.0)+((DriverAway/FrameCount)*100.0) ),(910,475), font, 0.5,(255,255,255),1,cv2.LINE_AA)\n \n cv2.imshow('Driver Detection', vis)\n if cv2.waitKey(5) == 27:\n break\n cv2.destroyAllWindows()\n" } ]
1
rocksonchang/Fringe-Reduction
https://github.com/rocksonchang/Fringe-Reduction
18fd218863d131d61ffd5daf6b822b0d46f9717c
daf3219df23c935f9e87406ce31b1774f66d612c
6a56c8331e3fe9b6915b3dd22970cb278bdd59f2
refs/heads/master
2020-04-02T03:22:35.297967
2018-04-22T21:49:21
2018-04-22T21:49:21
60,921,975
0
0
null
2016-06-11T18:32:28
2016-06-12T22:51:42
2018-04-22T21:49:22
Jupyter Notebook
[ { "alpha_fraction": 0.728735625743866, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 42.400001525878906, "blob_id": "2a93b61e1e7e24a0e00aaf6ae0a63a233e1a61f1", "content_id": "543155681ca82b48e6b68cc8c95173551422d27b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 435, "license_type": "no_license", "max_line_length": 108, "num_lines": 10, "path": "/README.md", "repo_name": "rocksonchang/Fringe-Reduction", "src_encoding": "UTF-8", "text": "# Fringe-Reduction\nRepository for implementation of fringe reduction alogirthm\n\nThis project has been coded in Python and presented as an iPython notebook. It contains the following files\n\n`Fringe-Reduction-v1p2.ipynb` - ipython notebook\n\n`SchmidtDecomp.py` - python module containing functions for the algorithm\n\n`Data` - test data set, consisting of .tif image pairs (absorption and reference)\n\n" }, { "alpha_fraction": 0.5977542996406555, "alphanum_fraction": 0.616908848285675, "avg_line_length": 54.925926208496094, "blob_id": "805356e26bba9b3c011ae94454334a6df13c34d6", "content_id": "d5da953f08a99f676af977a8dd69480fcdf6476c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1514, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/SchmidtDecomp.py", "repo_name": "rocksonchang/Fringe-Reduction", "src_encoding": "UTF-8", "text": "import numpy as np\n\n# function to generate basis of noise vectors. \n# Uses Schmidt decomposition of the array of reference images. \n# INPUT: imgRef are the reference images (3D array -- a 1D array of 2D images). \n# OUTPUT: refBasis are the generated basis images (3D array -- a 1D array of 2D images).\ndef genBasis(imgRef): \n refBasis=np.zeros((len(imgRef),255, 256))\n for i in range(0,len(imgRef)):\n refBasis[i,:,:]=imgRef[i,:,:]/np.sqrt(np.sum(imgRef[i,:,:]*imgRef[i,:,:]))\n for j in range(0,i): \n coeff = np.sum(refBasis[i,:,:]*refBasis[j,:,:]) \n refBasis[i,:,:]=refBasis[i,:,:]-coeff*refBasis[j,:,:]\n refBasis[i,:,:]=refBasis[i,:,:]/np.sqrt(np.sum(refBasis[i,:,:]*refBasis[i,:,:]))\n return refBasis \n\n# function to generate matched reference images for absorption images\n# Projects the raw absorption image onto the basis of noise vectors\n# INPUT: imgAbs are the raw absorption images (3D array -- a 1D array of 2D images). \n# INPUT: refBasis is the basis of noise vectors (3D array -- a 1D array of 2D images). \n# OUTPUT: modRef are the generated matched reference images (3D array -- a 1D array of 2D images).\ndef genReference(imgAbs, refBasis): \n modRef = np.zeros((len(imgAbs),255,256))\n for i in range(len(imgAbs)): \n for j in range(len(refBasis)): \n modRef[i,:,:] = modRef[i,:,:] + np.sum(imgAbs[i,:,:]*refBasis[j,:,:]) *refBasis[j,:,:] \n return modRef\n " } ]
2
zhouly008/HogwartsSDET11
https://github.com/zhouly008/HogwartsSDET11
1b9a37d889f4bade54924cf09db2354774106575
d99b18e2154478403c2f3167223db420156c7f4a
2590a289909ba18f12e82f567da91e712b1e1998
refs/heads/master
2021-03-15T15:46:41.992918
2020-05-03T15:43:44
2020-05-03T15:43:44
246,862,537
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5790441036224365, "alphanum_fraction": 0.5808823704719543, "avg_line_length": 24.809524536132812, "blob_id": "07f748b2ac30f84a784677c34821da1a8200784e", "content_id": "33d62d7250025dc6458b608d553feeb891c8d4c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 71, "num_lines": 21, "path": "/test_requests/test_wework/testcase/test_wework.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "\nfrom test_requests.test_wework.api.wework import WeWork\n\n\nclass TestWeWork:\n # secret = \"3XBa77sS_W304tGdt-Sc-YManyJ5sKlwq4dSzrIzE_g\"\n\n @classmethod\n def setup_class(cls):\n # 调用密钥,每个部门都有自己的密钥 #\n cls.token= WeWork.get_token()\n\n def test_get_token(self):\n # r = requests.get(\n # self.token_url,\n # params={\"corpid\": self.corpid, \"corpsecret\": self.secret}\n # )\n r = WeWork.get_access_token(WeWork.secret)\n assert r[\"errcode\"] == 0\n\n def test_get_token_exist(self):\n assert self.token is not None\n\n" }, { "alpha_fraction": 0.5880614519119263, "alphanum_fraction": 0.5951536893844604, "avg_line_length": 32.84000015258789, "blob_id": "33d80dad673e7c15c812d88acd350ce5c95176ee", "content_id": "015e944870b6b2d3beb7feaafdea79861678b83a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 109, "num_lines": 50, "path": "/test_appium/page/app.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "from appium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom test_appium.page.base_page import BasePage\nfrom test_appium.page.main import Main\n\n\nclass App(BasePage):\n _package = \"com.xueqiu.android\"\n _activity = \"view.WelcomeActivityAlias\"\n\n def start(self):\n # 判断如果为空才会创建,不会空则重用,绕过appium重新启动的过程\n if self._driver is None:\n caps = {}\n caps[\"platformName\"] = \"android\"\n caps[\"deviceName\"] = \"hogwarts\"\n # caps[\"appPackage\"] = \"com.xueqiu.android\"\n caps[\"appPackage\"] = self._package\n # caps[\"appActivity\"] = \".view.WelcomeActivityAlias\"\n caps[\"appActivity\"] = self._activity\n caps[\"udid\"]= os.get\n caps[\"noReset\"] = True\n # caps[\"dontStopAppOnReset\"] = True\n # caps[\"unicodeKeyboard\"] = True\n # caps[\"resetKeyboard\"] = True\n # caps[\"skipServerInstallation\"] = True\n # caps[\"chromedriverExecutableDir\"]=\"/Users/seveniruby/projects/chromedriver/all\"\n caps[\"chromedriverExecutable\"] = \"/Users/zhoulingyun/projects/chromedriver/all/chromedriver_2.20\"\n\n # caps['avd'] = 'Pixel_2_API_23'\n\n self._driver = webdriver.Remote(\"http://localhost:4723/wd/hub\", caps)\n self._driver.implicitly_wait(30)\n else:\n print(self._driver)\n # todo : kill app start app,确保从首页开始进入\n self._driver.start_activity(self._package, self._activity)\n\n return self\n\n def restart(self):\n pass\n\n def stop(self):\n pass\n\n def main(self) -> Main:\n # todo: wait mian page\n return Main(self._driver)\n" }, { "alpha_fraction": 0.5844748616218567, "alphanum_fraction": 0.5890411138534546, "avg_line_length": 18.909090042114258, "blob_id": "9abfb4cebe9e1f7be56246ab919643cf96331b14", "content_id": "663d046ba1a43ab97d5cb525ce34dc15945fb03a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/test_requests/test_wework/testcase/test_tag.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "from test_requests.test_wework.api.tag import Tag\n\n\nclass TestTag:\n @classmethod\n def setup_class(cls):\n cls.tag = Tag()\n\n def test_get(self):\n r = self.tag.get()\n assert r['errcode'] == 0\n" }, { "alpha_fraction": 0.5729537606239319, "alphanum_fraction": 0.5829181671142578, "avg_line_length": 30.954545974731445, "blob_id": "bd5bf730f0c3b4e3c2ccec4e4ecd8ed4888d5057", "content_id": "503057e6577bd79c1662cc1eb790ed1adfb3015a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 108, "num_lines": 44, "path": "/test_appium/page/app1.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "from appium import webdriver\nfrom test_appium.page.base_page1 import BasePage\nfrom test_appium.page.main1 import Main\n\n\nclass App(BasePage):\n _package = \"com.xueqiu.android\"\n _activity = \".view.WelcomeActivityAlias\"\n\n def start(self):\n if self._driver is None:\n caps = {}\n caps[\"platformName\"] = \"android\"\n caps[\"deviceName\"] = \"hogwarts\"\n caps[\"appPackage\"] = self._package\n caps[\"appActivity\"] = self._activity\n caps[\"noReset\"] = True\n # caps[\"dontStopAppOnReset\"] = True\n # caps[\"unicodeKeyboard\"] = True\n # caps[\"resetKeyboard\"] = True\n # caps[\"skipServerInstallation\"] = True\n # caps[\"chromedriverExecutableDir\"]=\"/Users/seveniruby/projects/chromedriver/all\"\n caps[\"chromedriverExecutable\"] = \"/Users/seveniruby/projects/chromedriver/all/chromedriver_2.20\"\n\n # caps['avd'] = 'Pixel_2_API_23'\n\n self._driver = webdriver.Remote(\"http://localhost:4723/wd/hub\", caps)\n self._driver.implicitly_wait(30)\n else:\n print(self._driver)\n # todo:\n self._driver.start_activity(self._package, self._activity)\n\n return self\n\n def restart(self):\n pass\n\n def stop(self):\n pass\n\n def main(self) -> Main:\n # todo: wait man page\n return Main(self._driver)" }, { "alpha_fraction": 0.6053131222724915, "alphanum_fraction": 0.6204933524131775, "avg_line_length": 26.789474487304688, "blob_id": "5ed22ef7b6bcfcb807d7c19d44078343aec13330", "content_id": "897b22669ef1f07d466e98d6a09a79e967e96b7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 83, "num_lines": 19, "path": "/test_appium/testcase/test_search.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom test_appium.page.app import App\n\n\nclass TestSearch:\n def setup(self):\n self.main = App().start().main()\n\n def test_search(self):\n assert self.main.goto_search_page.search(\"alibaba\").get_price(\"BABA\") > 190\n\n # 参数化,三个参数的名字\n @pytest.mark.parametrize(\"key, stock_type, price\", [\n (\"alibaba\", \"BABA\", 190),\n (\"JD\", \"JD\", 20)\n ])\n def test_search_data(self, key, stock_type, price):\n assert self.main.goto_search_page.search(key).get_price(stock_type) > price" }, { "alpha_fraction": 0.6200787425041199, "alphanum_fraction": 0.6328740119934082, "avg_line_length": 30.75, "blob_id": "a5a1adda3e3cd4a085df5c45a283c53785883a6e", "content_id": "206546f2077803c4995ce9cd58c8219771fbc59f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1202, "license_type": "no_license", "max_line_length": 68, "num_lines": 32, "path": "/test_requests/test_wework/testcase/test_groupchat.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "from test_requests.test_wework.api.groupchat import GroupChat\nfrom test_requests.test_wework.api.wework import WeWork\n\n\nclass TestWeWork:\n @classmethod\n def setup_class(cls):\n cls.groupchat = GroupChat()\n # 调用密钥,每个部门都有自己的密钥,# 在case的内部的groupchat搞定了token的获取\n # cls.token = WeWork.get_token(cls.groupchat.secret)\n\n # 把稳定的具有业务含义的功能封装到细节里,入参决定要测试的数据,返回值决定要断言的数据,case是\n # 稳定的,po是封装的思想\n def test_groupchat_get(self):\n r = self.groupchat.list()\n assert r['errcode'] == 0\n\n def test_groupchat_get_status(self):\n r = self.groupchat.list(offset=0, limit=10, status_filter=1)\n assert r[\"errcode\"] == 0\n\n # 查询群聊的人员信息,直接返回json\n def test_groupchat_detail(self):\n r = self.groupchat.list(offset=0, limit=10, )\n assert r['errcode'] == 0\n\n chat_id = r['group_chat_list'][0]['chat_id']\n r = self.groupchat.get(chat_id)\n\n # print(r.json())\n assert r['errcode'] == 0\n assert len(r['group_chat']['member_list']) > 0\n" }, { "alpha_fraction": 0.5471698045730591, "alphanum_fraction": 0.5471698045730591, "avg_line_length": 20.15999984741211, "blob_id": "f367c2fad668bf979636b3c2379642c53d5d4e21", "content_id": "d485ebadec57960d1a32f0a1c3a8c172808a821a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 86, "num_lines": 25, "path": "/test_requests/test_wework/api/tag.py", "repo_name": "zhouly008/HogwartsSDET11", "src_encoding": "UTF-8", "text": "import requests\n\nfrom test_requests.test_wework.api.wework import WeWork\n\n\nclass Tag(WeWork):\n def get(self):\n url = \"https://qyapi.weixin.qq.com/cgi-bin/externalcontact/get_corp_tag_list?\"\n r = requests.post(\n url,\n params={'access_token': self.get_token(self.secret)},\n json={\"tag_id\": []}\n )\n # 打印一下输出,返回json\n self.format(r)\n return r.json()\n\n def add(self):\n pass\n\n def update(self):\n pass\n\n def delete(self):\n pass\n\n" } ]
7
alexandra-blaine/AssignmentsPPZNK
https://github.com/alexandra-blaine/AssignmentsPPZNK
e9736fff362147b8131e186601e09abe1e227106
866c460375808c43596caaf442c326eac7b5d1dc
5ce373538baa99cd8f8d3bb9e8970e67e9da61ec
refs/heads/master
2020-05-07T08:44:16.119638
2019-04-15T16:14:48
2019-04-15T16:14:48
180,344,035
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.35153019428253174, "alphanum_fraction": 0.5128205418586731, "avg_line_length": 22.081632614135742, "blob_id": "78c9ac7f88ecca40267e858b4f48ba4fda1c620b", "content_id": "ab03b0957b2d127499b22946b943e9d86501f698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 90, "num_lines": 49, "path": "/initials.pyde", "repo_name": "alexandra-blaine/AssignmentsPPZNK", "src_encoding": "UTF-8", "text": "def setup():\r\n size(750,500)\r\n textSize(125)\r\n textAlign(CENTER)\r\n\r\n \r\ndef draw():\r\n \r\n if(((mouseX>=460 and mouseX<=540)and(mouseY>=25 and mouseY<=150))or keyPressed==True):\r\n fill(255,15,2)\r\n text(\"A\",100,125)\r\n else:\r\n fill(127,7,196)\r\n text(\"A\",100,125)\r\n \r\n text(\" \",300,125)\r\n \r\n if(((mouseX>=60 and mouseX<=140)and(mouseY>=25 and mouseY<=150))or keyPressed==True):\r\n fill(255,175,2)\r\n text(\"B\",500,125)\r\n else:\r\n fill(255,2,196)\r\n text(\"B\",500,125)\r\n \r\n noFill()\r\n beginShape()\r\n curveVertex(45, 350)\r\n curveVertex(45, 350)\r\n curveVertex(100, 370)\r\n curveVertex(150, 380)\r\n curveVertex(200, 390)\r\n curveVertex(250, 400)\r\n curveVertex(300, 410)\r\n curveVertex(350, 420)\r\n curveVertex(400, 430)\r\n curveVertex(450, 440)\r\n curveVertex(500, 450)\r\n curveVertex(550, 460)\r\n curveVertex(600, 470)\r\n curveVertex(600, 470)\r\n endShape()\r\n \r\ndef keyPressed():\r\n if key==CODED:\r\n if keyCode==RIGHT: \r\n fill(255,2,196)\r\n text(\"B\",100,300)\r\n fill(127,7,196)\r\n text(\"A\",500,300)\r\n \r\n\r\n\r\n \r\n" } ]
1
basketdump/horoscope
https://github.com/basketdump/horoscope
76742575f9f4fc6516ecb32f0fb3fb4b97b46aed
8fba5310b99a528da40bba353dab3b9aad3394ef
1da869635fbf9dbd8c9b4f45f66246080c2bd76f
refs/heads/master
2018-11-24T05:52:19.347509
2018-09-04T16:36:56
2018-09-04T16:36:56
113,056,732
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6560846567153931, "alphanum_fraction": 0.6560846567153931, "avg_line_length": 22.625, "blob_id": "6a6311b979e2fd6f1af2f059e250548b71b1cbd2", "content_id": "8d3edb6ad0940ebc2f07c5ba23546a773648e828", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/main.py", "repo_name": "basketdump/horoscope", "src_encoding": "UTF-8", "text": "'''horoscope grabber service'''\nimport Horoscope\n\n\ndef write_to_file(my_horoscope, path):\n '''write to target file'''\n horoscope_page = open(path, 'w')\n horoscope_page.write(my_horoscope)\n horoscope_page.close()\n\n\n\ndef main():\n '''please change sign, day, and html path to fit your needs'''\n sign = 'aquarius'\n day = 'today' # can be 'today', 'yesterday', or 'tomorrow'\n file_path = '/home/minty/horoscope.html'\n\n my_horoscope = Horoscope.get_horoscope(sign, day)\n print(my_horoscope)\n write_to_file(my_horoscope, file_path)\n\n\nmain()\n" }, { "alpha_fraction": 0.679721474647522, "alphanum_fraction": 0.6805918216705322, "avg_line_length": 31.828571319580078, "blob_id": "e3b5d995359be49b032aca76a963fcdf971110e3", "content_id": "56db805b9d1c09f617baf3f6ba6cf93af4d2501f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 100, "num_lines": 35, "path": "/Horoscope.py", "repo_name": "basketdump/horoscope", "src_encoding": "UTF-8", "text": "'''\nThis module helps pull daily horoscope from\nhttps://www.astrology.com/horoscope/daily/gemini.html\n'''\nimport urllib.request\nimport bs4 as bs\n\n\ndef setup_sauce(sign, day):\n '''setup our sauce. if today horoscope is chosen, it actually doesn't add anything to the url'''\n target_url = 'https://www.astrology.com/horoscope/daily/'\n if day.lower() != 'today':\n target_url += day.lower() + '/' + sign.lower() + '.html'\n else:\n target_url += sign.lower() + '.html'\n return urllib.request.urlopen(target_url).read()\n\n\ndef setup_soup(sign, day):\n '''using lxml for our bs object and return it to grab_paragraph for grabbing horoscope text'''\n sauce = setup_sauce(sign, day)\n soup = bs.BeautifulSoup(sauce, 'lxml')\n return soup\n\n\ndef grab_paragraph(sign, day):\n '''scrapes the appropriate paragraph from astrology.com using sign and day'''\n soup = setup_soup(sign, day)\n return soup.p.span.next.next\n\n\ndef get_horoscope(sign, day='today'):\n '''will grab dpriaily horoscope from astrology.com. day must be yesterday, today, or tomorrow'''\n horoscope = grab_paragraph(sign, day)\n return horoscope\n" } ]
2
Ting-Kim/master-py
https://github.com/Ting-Kim/master-py
7300a4a4fcd2bff7cac2c2099e249b7e3576f8db
d4116e87f9d12640ffab1f161c37dfbecc2ebff8
ed0a4a1727626cb81968f9ff94b11fba6cf4c167
refs/heads/master
2022-12-25T04:11:56.736797
2020-09-25T09:30:01
2020-09-25T09:30:01
275,519,152
0
0
null
2020-06-28T06:08:33
2020-06-28T05:31:26
2020-06-28T05:31:24
null
[ { "alpha_fraction": 0.5668523907661438, "alphanum_fraction": 0.5779944062232971, "avg_line_length": 18.94444465637207, "blob_id": "4eaa35b28d276fa2fad447e11c72ea01f67e25c9", "content_id": "4716e6a2c06f573594778fb146b768fca50c6bfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 44, "num_lines": 36, "path": "/algorithm/1_basic/2_linked_list/n1158.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "# 요세푸스 순열 구하는 함수 정의\ndef yosePus(tolerance, list):\n answer = []\n # 초기 인덱스\n index = tolerance-1\n answer.append(list.pop(index))\n while(len(list) != 0):\n size = len(list)\n index += (tolerance-1)\n # list의 size를 초과하지 않도록 0~size 값 부여\n index = index % size\n answer.append(list.pop(index))\n return answer\n\n\nlength, start = map(int, input().split())\n\n# 입력값 Check\n# print(length)\n# print(start)\n\ngroup = []\nfor i in range(length):\n group.append(i+1)\n\nanswer = yosePus(start, group)\n\n# 값을 문자열로 포맷팅\nanswerP = \"<\"\nfor j in range(length):\n if j != length-1:\n answerP += \"{0}, \".format(answer[j])\n else:\n answerP += \"{0}>\".format(answer[j])\n\nprint(answerP)\n" }, { "alpha_fraction": 0.5255306959152222, "alphanum_fraction": 0.5708548426628113, "avg_line_length": 20.518518447875977, "blob_id": "ab28d56071369b3f003aeaf891024c4ea05d28c1", "content_id": "152eade6474dd1a6db8299362d280b6901f83173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1917, "license_type": "no_license", "max_line_length": 55, "num_lines": 81, "path": "/algorithm/1_basic/2_linked_list/N1021.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "# acmicpc.net/problem/1021\n\n\n# 입력 리스트 요소 -1 기능(왼쪽 move, delete 시)\ndef minusList(list1, list2):\n size_list1 = len(list1)\n size_list2 = len(list2)\n for i in range(size_list1):\n if list1[i] <= 1:\n list1[i] = size_list2\n else:\n list1[i] -= 1\n\n\n# 입력 리스트 요소 +1 기능(오른쪽 move시)\ndef plusList(list1, list2):\n size_list1 = len(list1)\n size_list2 = len(list2)\n for i in range(size_list1):\n if list1[i] > size_list2-1:\n list1[i] = 1\n else:\n list1[i] += 1\n\n\n# a1 삭제 기능\ndef deleteInitial(list1, list2):\n del list2[0]\n # print(\"현재 list2의 길이 : \", len(list2))\n minusList(list1, list2)\n\n\n# a(index)를 a1자리까지 왼쪽으로 이동\ndef leftMove(list1, index, list2):\n global result\n for i in range(index):\n a = list2[0]\n del list2[0]\n list2.append(a)\n result += 1\n minusList(list1, list2)\n\n\n# a(index)를 a1자리까지 오른쪽으로 이동\ndef rightMove(list1, index, list2):\n size_list2 = len(list2)\n global result\n for i in range(len(list2)-index):\n a = list2[size_list2-1]\n del list2[size_list2-1]\n list2.insert(0, a)\n plusList(list1, list2)\n result += 1\n\n\nn, m = map(int, input().split())\nlist = list(map(int, input().split()))\n\n# print(n, m)\n# print(list)\n\nglobal result\nresult = 0\nlList = []\n# n의 크기만큼 리스트에 1~n 값 삽입\nfor i in range(n):\n lList.append(i+1)\n\n\n# 연산처리 해야하는 element 마다 값에 따라 연산 수행\nfor i in range(len(list)):\n if list[i] == 1:\n deleteInitial(list, lList)\n elif list[i] <= (len(lList)+1)//2 and list[i] != 1:\n leftMove(list, list[i]-1, lList)\n deleteInitial(list, lList)\n else:\n rightMove(list, list[i]-1, lList)\n deleteInitial(list, lList)\n\nprint(result)\n" }, { "alpha_fraction": 0.3380816578865051, "alphanum_fraction": 0.45773980021476746, "avg_line_length": 22.399999618530273, "blob_id": "c547c1b1753cde02247db4d6553dd6833a8916b2", "content_id": "bfef713679046f7c768da535a108658fecb230e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 95, "num_lines": 45, "path": "/MIT_Algorithm_with_py/searchMax2D.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "# 2차원 배열에서의 극대값 찾기.\n\n# i ,j = 탐색을 시작할 세로, 가로 인덱스값\n\n\ndef searchMax2D(i, array):\n\n rowMax = max(array[i])\n x = array[i].index(rowMax)\n\n # 중앙에서부터 시작해서 행이 모서리에 위치하였을 때는 반환.\n\n if i <= 0:\n if array[i][x] >= array[i+1][x]:\n return array[i][x]\n else:\n return None\n\n if i >= len(array)-1:\n if array[i][x] >= array[i-1][x]:\n return array[i][x]\n else:\n return None\n\n if array[i][x] < array[i-1][x]:\n result = searchMax2D(i-1, array)\n return result\n elif array[i][x] < array[i+1][x]:\n result = searchMax2D(i+1, array)\n return result\n else:\n return rowMax\n\n\narray = [[63, 57, 50, 37, 44],\n [6, 30, 10, 24, 42],\n [30, 29, 10, 20, 43],\n [7, 8, 49, 48, 44],\n [32, 55, 54, 53, 4],\n [33, 28, 6, 54, 74]]\n# [[63, 57, 50, 37, 44], [6, 89, 74, 24, 50],\n# [30, 29, 10, 87, 89], [75, 8, 70, 56, 79], [99, 55, 54, 98, 4], [33, 28, 6, 89, 74]]\n\nprint(searchMax2D((len(array)-1)//2, array)\n )\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 9, "blob_id": "0cbca2620cb75d3289a83ff3df760e36886f4fdd", "content_id": "3464c0e06a4fcecb8aa036b1e1c3e60d37d3836a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 32, "license_type": "no_license", "max_line_length": 11, "num_lines": 2, "path": "/README.md", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "# master-py\n김바드 학습용\n" }, { "alpha_fraction": 0.5728155374526978, "alphanum_fraction": 0.582524299621582, "avg_line_length": 16.16666603088379, "blob_id": "94d5e76b283ffef59412b0a4864b5bb7d6b3846d", "content_id": "734ea341cb70176026759565a2a215d0b7aa796b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/MIT_Algorithm_with_py/test.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "a, b = map(int, input().split())\nanswer = 0\n# arr = list(map(int, input().split()))\n\nprint(a)\nprint(b)\n" }, { "alpha_fraction": 0.5783132314682007, "alphanum_fraction": 0.6204819083213806, "avg_line_length": 19.625, "blob_id": "b8ff7e422a81dcfef694a9c87a5254e76dfbd907", "content_id": "6548e9172553d16900be80cf6c2de4f4ed16aed1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/algorithm/1_basic/1_brute_force/N15728_2.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "n, k = list(map(int, input().split()))\ncommon = list(map(int, input().split()))\nteam = list(map(int, input().split()))\n\nanswer = -1000001\n\ncommon.sort()\nteam.sort()\n\n" }, { "alpha_fraction": 0.5885714292526245, "alphanum_fraction": 0.6257143020629883, "avg_line_length": 17.421052932739258, "blob_id": "80c4853f5a59cf163c7885e055fbd1f3a30ac94a", "content_id": "c53bc0cbaad4e3fdff678aa24a7f7eedeb194526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 67, "num_lines": 19, "path": "/algorithm/1_basic/2_linked_list/N2346.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "# https://www.acmicpc.net/problem/2346\n# 파이썬 List가 Linked List 기능 모두 가지고 있다길래 next랑 prev,remove 쓰려는데 안된다..\n\n\nn = 10\ninputList = [1, 2, 3, 4, 5]\ninputIter = iter(inputList)\n# print(inputList)\nindex = 0\ndata = inputList[index]\n\nfor i in inputList:\n if data > 0:\n for item in range(data):\n\n print(inputList.\n\n else:\n pass\n" }, { "alpha_fraction": 0.5314401388168335, "alphanum_fraction": 0.5801216959953308, "avg_line_length": 29.8125, "blob_id": "98de2d99d95ea970c3e4dc041195655969ee67e5", "content_id": "1b0ffaf9786aaaa4d24a6baadc1533151a69032a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/MIT_Algorithm_with_py/searchMax.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "def searchMax(start, end, array):\n middle = (start+end)//2\n if middle <= start or middle >= end:\n return array[middle]\n\n if array[middle-1] > array[middle]:\n return searchMax(start, middle, array)\n elif array[middle] < array[middle+1]:\n return searchMax(middle+1, end, array)\n else:\n return array[middle]\n\n\ntest = [1, 2, 3, 4, 5, 3, 2, 1, 6, 7, 8, 7, 5, 3, 3, 3, 7, 8]\nprint(\"극대값을 찾을 배열 : \", test)\nprint(\"찾은 극대값 : \", searchMax(0, len(test)-1, test))\n" }, { "alpha_fraction": 0.460966557264328, "alphanum_fraction": 0.5204461216926575, "avg_line_length": 17.55172348022461, "blob_id": "9c5273b79e953b8ea6143aa4f88fc1c99d8b840b", "content_id": "f3d1a052f4766a175e2da185ebf1bd19b2afb394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 40, "num_lines": 29, "path": "/algorithm/1_basic/1_brute_force/N15728.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "# N, K : 5 2\n# 공유 숫자 카드 : -1 2 3 4 5\n# 팀 숫자 카드 : -1 0 2 3 4\n# 우리팀이 얻을 수 있는 최대 점수 출력\n\nn, k = list(map(int, input().split()))\ncommon = list(map(int, input().split()))\nteam = list(map(int, input().split()))\nanswer = -100000001\nmax_team = team[0]\n\nfor item in range(k+1):\n for x in common:\n for y in team:\n if x*y > answer:\n answer = x*y\n max_team = y\n\n if item != k:\n team.remove(max_team)\n answer = -100000001\n\n\nprint(answer)\n\n\n# print(n)\n# print(k)\n# print(common, team)\n" }, { "alpha_fraction": 0.5165165066719055, "alphanum_fraction": 0.5405405163764954, "avg_line_length": 17.5, "blob_id": "1e0aa9bca2a929b8e86531bcb78024268e9f2a87", "content_id": "cb450fb134b280e764913a86da046683c7d7155e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 38, "num_lines": 18, "path": "/algorithm/1_basic/1_brute_force/N1182.py", "repo_name": "Ting-Kim/master-py", "src_encoding": "UTF-8", "text": "def part_list(sum, index):\n global answer\n if index == a:\n if sum == b:\n answer += 1\n return\n part_list(sum+arr[index], index+1)\n part_list(sum, index+1)\n\na, b = map(int, input().split())\nanswer = 0\narr = list(map(int, input().split()))\n\npart_list(0, 0)\n\nif b == 0:\n answer -= 1\nprint(answer)\n" } ]
10
RobScales06/LinkScraper
https://github.com/RobScales06/LinkScraper
a4a825effcdf629a0c3aa8b3718bf69b71054ff5
a59c3eefc02a8ca4978fcf07cd990597702a4ddf
d3fe6fea5748b694be16fffef1e6740ed0644583
refs/heads/main
2023-01-21T14:55:42.549974
2020-11-29T10:47:23
2020-11-29T10:47:23
316,928,534
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7823529243469238, "alphanum_fraction": 0.7823529243469238, "avg_line_length": 27.33333396911621, "blob_id": "5f96bfbd0fc494939ac19c52a009d7e51db060d9", "content_id": "22bdfbc6a88d3bec2c52fed1b53d8909b66da089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 340, "license_type": "no_license", "max_line_length": 98, "num_lines": 12, "path": "/README.md", "repo_name": "RobScales06/LinkScraper", "src_encoding": "UTF-8", "text": "# LinkScraper\nSimple tool for scraping all http/https links embedded in a website and writing them to a csv file\n\ninput link to website you want to extract from\n\ninput output file name (without extension)\n\nif no name is provided, the links will only be printed, not written to file\n\nrequired libraries: requests, csv, os\n\nwritten by Rob Scales\n" }, { "alpha_fraction": 0.5587443709373474, "alphanum_fraction": 0.5704035758972168, "avg_line_length": 27.394737243652344, "blob_id": "379b89a1158083186794202e59a31c4395ceb6ae", "content_id": "6491891794211ebb473b206b11ad42d714dfa1ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 99, "num_lines": 38, "path": "/LinkScraper.py", "repo_name": "RobScales06/LinkScraper", "src_encoding": "UTF-8", "text": "#Created by Rob Scales\r\n#11/29/2020\r\n#LinkScraper\r\n#input a link and a filename to get a csv of all of the http or https links embedded in the website\r\n\r\n#necessary libraries: requests, csv, os:\r\nimport requests\r\nimport csv\r\nimport os\r\n\r\nprint(\"Input website URL:\")\r\nwebLink = input()\r\nprint(\"Name of output CSV (blank if none):\")\r\noutfile = input() + \".csv\"\r\n\r\noutfileTxt = \"\"\r\n\r\nurl = requests.get(webLink)\r\nhtmltext = url.text\r\n\r\nwith open(outfile, 'w') as csvfile: #open csv for writing\r\n for i in range(len(htmltext)):\r\n if(htmltext[i]=='h' and htmltext[i+1]=='t' and htmltext[i+2]=='t' and htmltext[i+3]=='p'):\r\n j=0\r\n while(htmltext[j+i] != \"\\\"\"):\r\n print(htmltext[j+i], end=\"\")\r\n outfileTxt += htmltext[j+i]\r\n j+=1\r\n outfileTxt +=\"\\n\"\r\n print(\"\\n\")\r\n csvfile.write(outfileTxt) #write links to csv\r\n#finish program\r\n\r\nif(outfile == \".csv\"): #eliminate file if no name\r\n os.remove(\".csv\")\r\n print(\"***\" + webLink + \" processed!***\")\r\nelse:\r\n print(\"***\" + webLink + \" processed to \" + outfile + \"!***\")" } ]
2
racquesta/powerball_viz
https://github.com/racquesta/powerball_viz
41dbc0cfad17a7e9f2abee53631aab3dc76917b7
2fe4c48ce575405ffa407b3814bd38ca92eebd6f
b3bdbb787b87014d72b509286c16b8c7493c86c7
refs/heads/master
2021-09-03T14:00:51.542928
2018-01-09T16:03:37
2018-01-09T16:03:37
112,858,482
1
1
null
2017-12-02T16:37:55
2017-12-05T03:54:28
2018-01-09T16:03:38
Jupyter Notebook
[ { "alpha_fraction": 0.4763731360435486, "alphanum_fraction": 0.4889276325702667, "avg_line_length": 35.075469970703125, "blob_id": "ee45ab541e8947c55a300d6c3638af0c223ba87a", "content_id": "34ff6242557ec886ae4a7f2ccac4709f7f0fb8f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5735, "license_type": "no_license", "max_line_length": 204, "num_lines": 159, "path": "/app.py", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "#mLab login\n# from mLab import username, password\n\n#flask setup\nfrom flask import Flask, render_template, redirect, jsonify\napp = Flask(__name__)\n\n#Mongo DB connection with mLab\nfrom pymongo import MongoClient\nimport pandas as pd\n\nclient = MongoClient(\"mongodb://%s:%[email protected]:33816/heroku_sdvkxt9m\" % (\"admin\", \"powerballdata\")) \n\ndb = client.heroku_sdvkxt9m\n\ntotal_collection = db.total_collection\nwinners_collection = db.winners_collection\njackpots_collection = db.jackpots_collection\n\nimport pandas as pd\nimport numpy as np\n# functions in other files for routes imported here\n\n\[email protected](\"/\")\ndef home():\n # scrape_dict = total_collection.find({}, {'year':1, '_id':0})\n # list_years =[x['year'] for x in scrape_dict]\n # print(list_years)\n return render_template(\"index.html\")\n\[email protected](\"/numbers\")\ndef nums():\n results = total_collection.find({}, {\"date_format\": 1,\n \"numbers\": 1,\n \"powerball\": 1,\n \"_id\": 0\n })\n results_list = [x for x in results]\n return jsonify(results_list)\n \[email protected](\"/years\")\ndef years():\n pipe = [{'$match': {'year': {'$in': [2010, \n 2011, \n 2012, \n 2013, \n 2014, \n 2015]}}}, \n {'$group': {'_id': '$year', 'year': {'$first': '$year'}}}, {'$sort': {'_id': 1}}]\n results = total_collection.aggregate(pipeline=pipe)\n results_list = [x['year'] for x in results]\n return jsonify(results_list)\n\[email protected](\"/jackpots/<chosen_year>/<data_point>/<dep_var>\")\ndef jackpots(chosen_year, data_point, dep_var): \n results = jackpots_collection.find({}, {data_point: 1, \n dep_var: 1, \n 'jackpot_run_id': 1,\n 'year': 1,\n 'date_format': 1,\n 'drawings_since_jackpot': 1,\n 'jackpot':1,\n '_id': 0 })\n df = pd.DataFrame(list(results))\n if chosen_year != 'all':\n df = df[df['year'] == int(chosen_year)]\n df_dict = df.to_dict(orient = 'list')\n return jsonify(df_dict)\n\[email protected](\"/jackpot\")\ndef jackpot():\n pipe = [{'$group': {'_id': '$date_format', 'jackpot': {'$avg': '$jackpot'}, 'total_tickets_sold': {\"$sum\": '$ticket_sales'}}}]\n results = total_collection.aggregate(pipeline=pipe)\n # jackpot = [x['jackpot'] for x in results]\n # total_tick_sales = [x['total_tickets_sold'] for x in results]\n jackpot = []\n total_tick_sales = []\n for x in results:\n jackpot.append(x['jackpot'])\n total_tick_sales.append(x['total_tickets_sold'])\n \n # print(total_tick_sales)\n jackpots_dict = {\n \"jackpots\": jackpot,\n \"ticket_sales\": total_tick_sales\n }\n return jsonify(jackpots_dict)\n\[email protected](\"/jackpots_all\")\ndef jackpots_all():\n results = total_collection.find({}, {'jackpot': 1, \n 'ticket_sales': 1, \n '_id': 0 }, {'$sort': {'ticket_sales': 1}})\n # jackpot = [x['jackpot'] for x in results]\n # total_tick_sales = [x['total_tickets_sold'] for x in results]\n jackpot = []\n total_tick_sales = []\n for x in results:\n jackpot.append(x['jackpot'])\n total_tick_sales.append(x['ticket_sales'])\n \n # print(total_tick_sales)\n jackpots_dict = {\n \"jackpots\": jackpot,\n \"ticket_sales\": total_tick_sales\n }\n return jsonify(jackpots_dict)\n\[email protected](\"/sales_data/<year>\")\ndef sales_data(year):\n year = int(year)\n pipe = [{'$match': {'year': year}},{'$group': {'_id': '$states', 'norm_draw_sales': {'$sum': '$norm_draw_sale_by_state'}, 'norm_pp_sales': {\"$sum\": '$norm_pp_sale_by_state'} }}, {'$sort': {'_id': 1}}]\n results = total_collection.aggregate(pipeline=pipe) \n \n df = pd.DataFrame(list(results))\n\n df.dropna(inplace = True)\n df['sum_sales']=df['norm_draw_sales']+df['norm_pp_sales']\n df.sort_values('sum_sales', ascending = False, inplace = True)\n sales_dict = df.to_dict(orient = 'list')\n \n return jsonify(sales_dict)\n\n\[email protected](\"/soc_data/<chosen_year>/<data_point>/<dep_var>\")\ndef soc_data(chosen_year, data_point, dep_var):\n pipe = [{\"$match\": {\"$and\" : [{'year': {\"$in\": [2011, \n 2012, \n 2013, \n 2014,\n 2015]}}, \n {\"state_abbr\": {\"$ne\": \"VI\"}}]}}, \n {'$group': {'_id': {'year': '$year', 'state': '$states'},\n 'independent': {'$avg': (\"$\" + data_point)}, \n 'dependent': {'$sum': ('$' + dep_var)}}}]\n results = total_collection.aggregate(pipeline=pipe)\n\n df = pd.DataFrame(list(results))\n\n df['year'] = 0\n df['state'] = ''\n for index, row in df.iterrows():\n year = int(row[\"_id\"]['year'])\n state = row['_id']['state']\n df.set_value(index, 'year', year)\n df.set_value(index, 'state', state)\n if chosen_year != \"all\":\n df = df[df['year'] == int(chosen_year)]\n del df['_id']\n\n df.dropna(inplace = True)\n\n df_dict = df.to_dict(orient = 'list')\n return jsonify(df_dict)\n\n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.4397275447845459, "alphanum_fraction": 0.451924592256546, "avg_line_length": 29.7967472076416, "blob_id": "462fda22e2523b1275b3abbe4f2181f94afce377", "content_id": "9643fe3181576773f264d922c8d983aa18d96499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 18939, "license_type": "no_license", "max_line_length": 743, "num_lines": 615, "path": "/static/js/logic.js", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "states = ['Alabama','Alaska','American Samoa','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','Dist. of Columbia','Federated States of Micronesia','Florida','Georgia','Guam','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Marshall Islands','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Northern Mariana Islands','Ohio','Oklahoma','Oregon','Palau','Pennsylvania','Puerto Rico','Rhode Island','South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virgin Island','Virginia','Washington','West Virginia','Wisconsin','Wyoming']\n// create a consistent array of color indices for us states\ncolor_num = {}\ncount = 1\n_.each(states, function(item){\n if(!color_num[item]){\n color_num[item] = count\n count += 11\n }\n})\nconsole.log(color_num)\n\nfunction jackpotReduce(jackpotarray){\n jackpot_reduced = []\n for(i=0; i<jackpotarray.length; i++){\n var value = jackpotarray[i]/15;\n jackpot_reduced.push(value)\n }\n return jackpot_reduced\n}\nfunction getStateColorIndex(statesArray){\n var colors = [];\n _.each(statesArray, function(item){\n console.log(color_num[item])\n colors.push(color_num[item])\n });\n return colors\n};\n\nfunction createHoverData(zarray, itemText, dateArray, dateText){\n hoverText = [];\n for(var i = 0; i<zarray.length; i++){\n var oneTool = itemText + zarray[i] + \"<br>\" + dateText + dateArray[i];\n hoverText.push(oneTool);\n };\n return hoverText\n}\n\nfunction colorIndex(arg){\n new_index = [arg[1]]\n for(i=0; i< arg.length; i++){\n var new_num = arg[i]/arg[1] *100\n new_index.push(new_num)\n }\n return new_index\n}\nconsole.log(\"js is working!\")\n\n// Set up the dropdown menu for the year of sales data\nvar $selDataset = document.getElementById(\"selDataset\");\nd3.json(\"/years\", function(error, response) {\n if (error) return console.log(error);\n var salesYears = response;\nfor (var i = 0; i < salesYears.length; i++) {\n var saleYear = salesYears[i];\n var $option = document.createElement(\"option\");\n $option.setAttribute(\"value\", saleYear);\n $option.innerHTML = saleYear;\n $selDataset.appendChild($option);\n };\n});\n\nfunction init(){\n var $bar = document.getElementById(\"bar\");\n var dataUrl = \"/sales_data/2010\"; \n d3.json(dataUrl, function(error, response) {\n if (error) return console.log(error);\n var salesData=[];\n for (i = 0; i < response._id.length; i++) { \n salesData.push({'state':response._id[i],'draw_sales':response.norm_draw_sales[i],'pp_sales':response.norm_pp_sales[i]});\n };\n var state_names = salesData.map(ayy => ayy.state);\n var state_draw_sales = salesData.map(ayy => ayy.draw_sales);\n var state_pp_sales = salesData.map(ayy => ayy.pp_sales);\n // console.log(state_pp_sales);\n var year= dataUrl;\n var trace_draw = {\n x: state_names,\n y: state_draw_sales,\n name: 'Base/Draw Ticket Sales',\n type: 'bar',\n marker: {color: \"#c61633\", opacity: .75}\n \n \n };\n var trace_pp = {\n x: state_names,\n y: state_pp_sales,\n name: 'Power Play Sales',\n type: 'bar',\n marker: {color: \"darkslateblue\",\n opacity: .75}\n };\n var data = [trace_draw, trace_pp];\n var layout = {\n title: \"2010 Ticket Sales Per Capita\",\n barmode: 'stack',\n showlegend: true,\n xaxis: {\n tickangle: -45\n },\n yaxis: {\n zeroline: false,\n gridwidth: 2\n },\n bargap :0.1,\n font: {\n family: \"Pacifico\",\n size: 13\n },\n margin: \n {\n top: 10,\n bottom: 2,\n right: 10,\n left: 10\n }\n }; \n Plotly.newPlot(\"bar\", data, layout); \n });\n\n d3.json(\"/jackpots/all/jackpot/ticket_sales\", function(error, response) {\n \n if (error) return console.warn(error);\n \n console.log(response);\n\n hoverInfo = createHoverData(response['drawings_since_jackpot'], \"Drawings Since Jackpot Winner: \", response['date_format'], \"Date: \")\n colors = colorIndex(response['jackpot_run_id'])\n jackpot_size = jackpotReduce(response['jackpot'])\n\n console.log(jackpot_size)\n var trace1 = {\n x: response['jackpot'],\n y: response['ticket_sales'],\n mode: 'markers',\n marker: {\n colorscale: 'RdBu',\n color: colors,\n opacity: .75,\n size: jackpot_size\n },\n name: response['date_format'],\n text: hoverInfo,\n type: 'scatter',\n };\n\n \n var layout = {\n hovermode: 'closest',\n hoverinfo: 'name+text',\n showlegend: false,\n height: 600,\n // width: 1200\n margin: \n {\n top: 10,\n bottom: 10,\n right: 10,\n left: 10\n },\n title: \"Total Ticket Sales versus Jackpot Value <br> 2010-2015\",\n font: {\n family: \"Pacifico\",\n size: 15,\n \n },\n xaxis: {\n title: \"Jackpot Value\",\n showline: true,\n zeroline: false,\n autorange: true\n },\n yaxis: {\n title: \"Ticket Sale Per Capita\",\n showline: true,\n zeroline: false,\n autorange: true\n },\n transition: {\n duration: 500,\n easing: 'cubic-in-out'\n }\n \n };\n var data = [trace1];\n \n Plotly.newPlot('graph', data, layout);\n \n });\n\n d3.json(\"/soc_data/all/Poverty Rate/norm_tick_sales\", function(error, response){\n\n if (error) return console.warn(error);\n\n console.log(response)\n \n \n colorArray = getStateColorIndex(response['state'])\n\n hoverInfo = createHoverData(response['state'], \"\", response['year'], \"\");\n\n console.log(hoverInfo)\n console.log(colorArray)\n // colors_list = []\n // for(var i=0; i<response[\"states\"].length; i++){\n // colors_list.push(color_dict[response[\"states\"][i]])\n // }\n //console.log(sizes)\n var trace1 = {\n x: response['independent'],\n y: response['dependent'],\n mode: 'markers',\n marker: {\n colorscale: \"RdBu\",\n color: colorArray,\n size: 15,\n opacity: .75\n },\n text: hoverInfo,\n type: \"scatter\"\n };\n \n var data = [trace1];\n \n var layout = {\n hovermode: 'closest',\n showlegend: false,\n height: 600,\n // width: 1200\n margin: \n {\n top: 10,\n bottom: 10,\n right: 10,\n left: 10\n },\n title: \"Ticket Sales Per Capita versus Poverty Rate <br> 2010-2015\",\n xaxis: {\n title: \"Poverty Rate (%)\",\n showline: true\n },\n yaxis: {\n title: \"Ticket Sale Per Capita\",\n showline: true\n },\n transition: {\n duration: 500,\n easing: 'cubic-in-out'\n },\n font: {\n family: \"Pacifico\",\n size: 15,\n \n },\n \n };\n \n \n Plotly.newPlot('soc_graph', data, layout);\n })\n };\n\nd3.select(\"#soc_graph_submit\").on(\"click\", function(){\n\n d3.event.preventDefault();\n\n var yaxis = d3.select(\"#depVar\").property(\"value\")\n var xaxis = d3.select(\"#indVar\").property(\"value\")\n var year = d3.select(\"#socYears\").property(\"value\")\n\n console.log(year)\n console.log(typeof year)\n switch (year){\n case \"2010-2015\":\n var chosen_year = \"all\";\n break;\n default:\n var chosen_year = year;\n };\n\n switch (yaxis){\n case \"Ticket Sales Per Capita\":\n var dep_var = \"norm_tick_sales\";\n var max_y = 26;\n break;\n default:\n var dep_var = \"norm_revenue\"\n var max_y = 40\n \n };\n\n switch (xaxis){\n case \"Median Income\":\n var data_point = \"Household Income\";\n break;\n default:\n var data_point = xaxis;\n }\n\n console.log(chosen_year)\n console.log(xaxis)\n console.log(dep_var)\n\n var route = \"/soc_data/\" + chosen_year + \"/\" + data_point + \"/\" + dep_var;\n console.log(route)\n d3.json(route, function(error, response){\n\n if (error) return console.warn(error);\n\n var new_x = response['independent'];\n var new_y = response['dependent'];\n var new_title = yaxis + \" Vs. \" + xaxis + \"<br>\" + year;\n var new_x_title = xaxis;\n var new_y_title = yaxis;\n var new_state_names = response['state']\n\n var new_colors = getStateColorIndex(response['state'])\n\n console.log(new_x)\n console.log(new_y)\n console.log(new_title)\n console.log(new_x_title)\n console.log(new_y_title)\n\n // Plotly.restyle('soc_graph', 'x', [new_x]);\n // Plotly.restyle('soc_graph', 'y', [new_y])\n // Plotly.restyle('soc_graph', 'markers.color', [new_colors])\n // Plotly.relayout('soc_graph', 'title', new_title)\n // Plotly.relayout('soc_graph', 'xaxis.title', new_x_title )\n\n function myArrayMax(arr) {\n var len = arr.length\n var max = -Infinity;\n while (len--) {\n if (arr[len] > max) {\n max = arr[len];\n }\n }\n return max;\n }\n \n\n function myArrayMin(arr) {\n var len = arr.length\n var min = Infinity;\n while (len--) {\n if (arr[len] < min) {\n min = arr[len];\n }\n }\n if (min < 5){\n min = 5;\n }\n return min;\n }\n\n var min_x = myArrayMin(new_x)\n var max_x = myArrayMax(new_x)\n \n var max_y_tickets = 25\n console.log(\"max\" + max_y_tickets)\n var max_y_dollars = 37\n console.log(\"max $\" + max_y_dollars)\n console.log(min_x)\n console.log(max_x)\n\n var buffer = 5;\n\n if (min_x >10000){\n buffer = 1000\n } ;\n if (min_x > 100000) {\n mix_x = 0\n };\n\n \n\n console.log(buffer)\n Plotly.animate('soc_graph', {\n data: [{x: new_x, \n y: new_y,\n text: new_state_names,\n markers: {\n color: new_colors\n }}],\n layout: {\n title: new_title,\n xaxis: {\n title: new_x_title,\n range: [min_x - buffer, max_x + buffer]\n },\n yaxis: {\n title: new_y_title,\n range: [0, max_y]\n }\n }\n }, {\n transition: {\n duration: 500,\n easing: 'cubic-in-out'\n }\n })\n })\n});\n\n\nd3.select(\"#jack_graph_submit\").on(\"click\", function(){\n \n d3.event.preventDefault();\n \n var yaxis = d3.select(\"#jackY\").property(\"value\")\n var xaxis = d3.select(\"#jackX\").property(\"value\")\n var year = d3.select(\"#jackYears\").property(\"value\")\n \n console.log(year)\n console.log(typeof year)\n switch (year){\n case \"2010-2015\":\n var chosen_year = \"all\";\n break;\n default:\n var chosen_year = year;\n };\n \n switch (yaxis){\n case \"Ticket Sales\":\n var dep_var = \"ticket_sales\";\n // var max_y = 26;\n break;\n case \"Jackpot Value\":\n var dep_var = \"jackpot\";\n // var max_y = 26;\n break;\n case \"Power Play Sales ($)\":\n var dep_var = \"state_pp_sales\";\n // var max_y = 26;\n break;\n case \"Base/Draw Ticket Sales ($)\":\n var dep_var = \"state_draw_sales\";\n // var max_y = 26;\n break;\n case \"Total Sales ($)\":\n var dep_var = \"revenue\";\n // var max_y = 26;\n break;\n default:\n var dep_var = \"revenue\"\n // var max_y = 40\n \n };\n \n switch (xaxis){\n case \"Jackpot Value\":\n var data_point = \"jackpot\";\n break;\n case \"Date\":\n var data_point = \"date_format\";\n break;\n default:\n var data_point = \"drawings_since_jackpot\";\n }\n \n console.log(chosen_year)\n console.log(xaxis)\n console.log(dep_var)\n \n var route = \"/jackpots/\" + chosen_year + \"/\" + data_point + \"/\" + dep_var;\n console.log(route)\n d3.json(route, function(error, response){\n \n if (error) return console.warn(error);\n \n var new_x = response[data_point];\n var new_y = response[dep_var];\n var new_title = yaxis + \" Vs. \" + xaxis + \"<br>\" + year;\n var new_x_title = xaxis;\n var new_y_title = yaxis;\n \n \n console.log(new_x)\n console.log(new_y)\n console.log(new_title)\n console.log(new_x_title)\n console.log(new_y_title)\n \n hoverInfo = createHoverData(response['drawings_since_jackpot'], \"Drawings Since Jackpot Winner: \", response['date_format'], \"Date: \")\n\n colors_new = colorIndex(response['jackpot_run_id'])\n console.log(colors_new) \n \n jackpot_size_new = jackpotReduce(response['jackpot'])\n console.log(jackpot_size_new)\n // \n var trace1 = {\n x: new_x,\n y: new_y,\n mode: 'markers',\n marker: {\n colorscale: 'RdBu',\n color: colors_new,\n opacity: .75,\n size: jackpot_size_new\n },\n name: response['date_format'],\n text: hoverInfo,\n type: 'scatter',\n };\n\n \n var layout = {\n hovermode: 'closest',\n hoverinfo: 'name+text',\n showlegend: false,\n height: 600,\n // width: 1200\n margin: \n {\n top: 10,\n bottom: 10,\n right: 10,\n left: 10\n },\n title: new_title,\n font: {\n family: \"Pacifico\",\n size: 15,\n \n },\n xaxis: {\n title: new_x_title,\n showline: true,\n zeroline: false,\n autorange: true\n },\n yaxis: {\n title: new_y_title,\n showline: true,\n zeroline: false,\n autorange: true\n },\n transition: {\n duration: 500,\n easing: 'cubic-in-out'\n }\n \n };\n var data = [trace1];\n Plotly.newPlot('graph', data, layout)\n // Plotly.restyle('graph', update, [0])\n // Plotly.restyle('graph', 'x', [new_x]);\n // Plotly.restyle('graph', 'y', [new_y])\n // Plotly.restyle('graph', 'text', [hoverInfo])\n // Plotly.restyle('graph', 'markers.color', [colors_new])\n // Plotly.relayout('graph', 'title', new_title)\n // Plotly.relayout('graph', 'xaxis.title', new_x_title)\n // Plotly.relayout('graph', 'yaxis.title', new_y_title)\n // Plotly.restyle('graph', 'markers.size', jackpot_size_new)\n \n });\n });\nfunction optionChanged(dataset) {\n var newdataUrl = `/sales_data/${dataset}`; \n d3.json(newdataUrl, function(error, response) {\n if (error) return console.log(error);\n var $salesData=[];\n for (i = 0; i < response._id.length; i++) { \n $salesData.push({'state':response._id[i],'draw_sales':response.norm_draw_sales[i],'pp_sales':response.norm_pp_sales[i]});\n };\n var $state_names = $salesData.map(ayy => ayy.state);\n var $state_draw_sales = $salesData.map(ayy => ayy.draw_sales);\n var $state_pp_sales = $salesData.map(ayy => ayy.pp_sales);\n var $year=`${dataset}`;\n var $trace_draw = {\n x: $state_names,\n y: $state_draw_sales,\n name: 'Base/Draw Ticket Sales',\n type: 'bar',\n marker: {color: \"#c61633\", opacity: .75}\n };\n var $trace_pp = {\n x: $state_names,\n y: $state_pp_sales,\n name: 'Power Play Sales',\n type: 'bar',\n marker: {color: \"darkslateblue\", opacity: .75}\n };\n \n var newData = [$trace_draw, $trace_pp];\n updatePlotly(newData, dataset);\n }); \n };\n \nfunction updatePlotly(newBardata, dataset) {\n var BAR= document.getElementById(\"bar\");\n var layout = {\n title: dataset + \" Ticket Sales Per Capita\",\n barmode: 'stack',\n showlegend: true,\n xaxis: {\n tickangle: -45\n },\n yaxis: {\n zeroline: false,\n gridwidth: 2\n },\n bargap :0.1,\n font: {\n family: \"Pacifico\",\n size: 15\n }\n };\n Plotly.newPlot(BAR, newBardata, layout);\n };\ninit();" }, { "alpha_fraction": 0.5233399271965027, "alphanum_fraction": 0.5368179082870483, "avg_line_length": 32.81111145019531, "blob_id": "ab4adb2706c4382078a9fe1e46d84365bfc48508", "content_id": "e44e0b66a2e56fe7116563c7eec93dc305fd955b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3042, "license_type": "no_license", "max_line_length": 204, "num_lines": 90, "path": "/Deshan/app.py", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "#mLab login\n# from mLab import username, password\n\n#flask setup\nfrom flask import Flask, render_template, redirect, jsonify\napp = Flask(__name__)\n\n#Mongo DB connection with mLab\nfrom pymongo import MongoClient\nimport pandas as pd\n\nclient = MongoClient(\"mongodb://%s:%[email protected]:33816/heroku_sdvkxt9m\" % (\"admin\", \"powerballdata\")) \n\ndb = client.heroku_sdvkxt9m\n\ntotal_collection = db.total_collection\n\n# functions in other files for routes imported here\n\n\[email protected](\"/\")\ndef home():\n # scrape_dict = total_collection.find({}, {'year':1, '_id':0})\n # list_years =[x['year'] for x in scrape_dict]\n # print(list_years)\n return render_template(\"index.html\")\n\n\[email protected](\"/years\")\ndef years():\n pipe = [{'$group': {'_id': '$year', 'year': {'$first': '$year'}}}, {'$sort': {'_id': 1}}]\n results = total_collection.aggregate(pipeline=pipe)\n results_list = [x['year'] for x in results]\n return jsonify(results_list)\n\[email protected](\"/jackpot\")\ndef jackpot():\n pipe = [{'$group': {'_id': '$date_format', 'jackpot': {'$avg': '$jackpot'}, 'total_tickets_sold': {\"$sum\": '$ticket_sales'}}}]\n results = total_collection.aggregate(pipeline=pipe)\n # jackpot = [x['jackpot'] for x in results]\n # total_tick_sales = [x['total_tickets_sold'] for x in results]\n jackpot = []\n total_tick_sales = []\n for x in results:\n jackpot.append(x['jackpot'])\n total_tick_sales.append(x['total_tickets_sold'])\n \n # print(total_tick_sales)\n jackpots_dict = {\n \"jackpots\": jackpot,\n \"ticket_sales\": total_tick_sales\n }\n return jsonify(jackpots_dict)\n\[email protected](\"/sales_data/<year>\")\ndef sales_data(year):\n year = int(year)\n pipe = [{'$match': {'year': year}},{'$group': {'_id': '$states', 'norm_draw_sales': {'$sum': '$norm_draw_sale_by_state'}, 'norm_pp_sales': {\"$sum\": '$norm_pp_sale_by_state'} }}, {'$sort': {'_id': 1}}]\n results = total_collection.aggregate(pipeline=pipe) \n \n df = pd.DataFrame(list(results))\n\n df.dropna(inplace = True)\n df['sum_sales']=df['norm_draw_sales']+df['norm_pp_sales']\n df.sort_values('sum_sales', ascending = False, inplace = True)\n sales_dict = df.to_dict(orient = 'list')\n \n return jsonify(sales_dict)\n# undfinished route\n\[email protected](\"/bubble_data\")\ndef bubble_data():\n results = total_collection.find({'year': {'$in': [2011, 2012, 2013, 2014, 2015]} }, {'date_format': 1, \n 'jackpot': 1, \n 'norm_tick_sales': 1, \n 'state_abbr': 1,\n 'revenue': 1,\n 'norm_revenue': 1,\n 'Poverty Rate': 1,\n 'Unemployment Rate': 1,\n 'Household Income': 1,\n '-id': 0 })\n \n return results\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.49484536051750183, "alphanum_fraction": 0.7061855792999268, "avg_line_length": 15.25, "blob_id": "d38eb76b7497e231f21d18b1defc8d06f340cec6", "content_id": "50be0f16f1d7c1ebb33d0c0e0e4a125e901aba16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 194, "license_type": "no_license", "max_line_length": 22, "num_lines": 12, "path": "/requirements.txt", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "click==6.7\nFlask==0.12.2\ngunicorn==19.7.1\nitsdangerous==0.24\nJinja2==2.9.6\nMarkupSafe==1.0\nmysqlclient==1.3.12\nnumpy==1.13.3\nWerkzeug==0.12.2\npython-dateutil==2.6.0\npymongo==3.5.1\npandas==0.20.1" }, { "alpha_fraction": 0.7833333611488342, "alphanum_fraction": 0.7833333611488342, "avg_line_length": 28, "blob_id": "6ab1d068f3cc068bb4526cc8b27332bcc08f76f6", "content_id": "33b6c6a382dbee779b4c13dde53da0e12b569648", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/README.md", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "# powerball_viz\nA webscraping/data viz dashboard project. \n" }, { "alpha_fraction": 0.5453255772590637, "alphanum_fraction": 0.5807977318763733, "avg_line_length": 26.824562072753906, "blob_id": "be0cd2d05caf4957fee2fb76e024b9f7fad2564f", "content_id": "941c4aa316fe20fda14b4c35edd4af47df4c1884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6343, "license_type": "no_license", "max_line_length": 126, "num_lines": 228, "path": "/Jamie/leaflet/leaflet.js", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "var map1 = L.map(\"map1\", {\n center: [35, -100],\n zoom: 4\n});\n\nvar map2 = L.map(\"map2\", {\n center: [35, -100],\n zoom: 4\n});\n\nL.tileLayer(\"https://api.mapbox.com/styles/v1/jamiejin91/cjb4aryzk6z6y2so0gnawkf6s/tiles/256/{z}/{x}/{y}?access_token=\" + \n\"pk.eyJ1IjoiamFtaWVqaW45MSIsImEiOiJjamFrYTFnbjEyZ2dvMzNxdTdkMHJ4cG1wIn0.uvp0QKa2TpxLA_-6JW1lIA\").addTo(map1);\nL.tileLayer(\"https://api.mapbox.com/styles/v1/jamiejin91/cjb4aryzk6z6y2so0gnawkf6s/tiles/256/{z}/{x}/{y}?access_token=\" + \n\"pk.eyJ1IjoiamFtaWVqaW45MSIsImEiOiJjamFrYTFnbjEyZ2dvMzNxdTdkMHJ4cG1wIn0.uvp0QKa2TpxLA_-6JW1lIA\").addTo(map2);\n\nvar link = \"https://raw.githubusercontent.com/racquesta/powerball_viz/master/Jamie/us_geojson_creator/us_territories.geojson\";\n\nvar acceptable = [\"num_of_jp_win\",\"jackpot_win_amount\",\"total_win_amount\",\"total_draw_sold\",\"total_draw_sales\"]\n\nfunction chooseColor(info,max_info) {\n if (info == 0) {\n return 'grey'\n }\n else if (info >= 0.9 * max_info) {\n return '#800026'\n }\n else if (info >= 0.75 * max_info) {\n return '#BD0026'\n }\n else if (info >= 0.5 * max_info) {\n return '#E31A1C'\n }\n else if (info >= 0.4 * max_info) {\n return '#FC4E2A'\n }\n else if (info >= 0.3 * max_info) {\n return '#FD8D3C'\n }\n else if (info >= 0.2 * max_info) {\n return '#FEB24C'\n }\n else if (info >= 0.1 * max_info) {\n return '#FED976'\n }\n else {\n return '#FFEDA0'\n }\n};\n\nfunction numberWithCommas(number) {\n number = Math.ceil(number)\n\n if (isNaN(number)) {\n return '';\n }\n\n var asString = '' + Math.abs(number),\n numberOfUpToThreeCharSubstrings = Math.ceil(asString.length / 3),\n startingLength = asString.length % 3,\n substrings = [],\n isNegative = (number < 0),\n formattedNumber,\n i;\n\n if (startingLength > 0) {\n substrings.push(asString.substring(0, startingLength));\n }\n\n for (i=startingLength; i < asString.length; i += 3) {\n substrings.push(asString.substr(i, 3));\n }\n\n formattedNumber = substrings.join(',');\n if (isNegative) {\n formattedNumber = '-' + formattedNumber;\n }\n\n return formattedNumber;\n};\n\nfunction chooseString(string) {\n switch (string) {\n case \"num_of_jp_win\":\n return \"# of Jackpot Winners: \";\n case \"jackpot_win_amount\":\n return \"Total Jackpot Amount Won: $\";\n case \"total_win_amount\":\n return \"Total Amount Won: $\";\n case \"total_draw_sold\":\n return \"# of Tickets Sold:\";\n case \"total_draw_sales\":\n return \"Total Sales Amount: $\";\n default:\n return;\n }\n};\n\nfunction findMax(data,param) {\n if (acceptable.includes(param)) {\n var param_data = [];\n for (i = 0; i < data.features.length; i++) {\n param_data.push(parseInt(data['features'][i]['properties'][param], 10))\n }\n var max_info = Math.max(...param_data)\n return(max_info)\n }\n else {\n return \"order_66\"\n }\n};\n\nfunction plot(map_x, param) {\n d3.json(link, function(data) {\n console.log()\n L.geoJson(data, {\n style: function(feature) {\n return {\n color: \"white\",\n fillColor: chooseColor(feature['properties'][param],findMax(data,param)),\n fillOpacity: 0.6,\n weight: 1.5\n };\n },\n onEachFeature: function(feature, layer) {\n layer.on({\n mouseover: function(event) {\n layer = event.target;\n layer.setStyle({\n fillOpacity: 0.9\n });\n },\n mouseout: function(event) {\n layer = event.target;\n layer.setStyle({\n fillOpacity: 0.5\n });\n },\n // click: function(event) {\n // map_x.fitBounds(event.target.getBounds());\n // }\n });\n\n if (param == 'jackpot_win_amount') {\n var temp_number = numberWithCommas(feature['properties'][param] * 1000000)\n var addin_str = \"</h1> <hr> <h2>\" + chooseString(param) + temp_number + \"</h2>\"\n }\n else if (param == \"order_66\") {\n var addin_str = \"\"\n }\n else {\n var temp_number = numberWithCommas(feature['properties'][param])\n var addin_str = \"</h1> <hr> <h2>\" + chooseString(param) + temp_number + \"</h2>\"\n }\n \n layer.bindPopup(\"<h1>\" + feature.properties.state_name + addin_str)\n }\n }).addTo(map_x);\n\n L.marker()\n });\n};\n\nfunction init() {\n var $dropDown1 = document.getElementById(\"selData1\")\n var $dropDown2 = document.getElementById(\"selData2\")\n\n for (var i=0; i< acceptable.length; i++){\n var $option1 = document.createElement(\"option\");\n $option1.innerHTML = chooseString(acceptable[i]).split(\":\")[0];\n $option1.setAttribute('id', acceptable[i]);\n $option1.setAttribute(\"value\", acceptable[i]);\n var $option2 = document.createElement(\"option\");\n $option2.innerHTML = chooseString(acceptable[i]).split(\":\")[0];\n $option2.setAttribute('id', acceptable[i]);\n $option2.setAttribute(\"value\", acceptable[i]);\n $dropDown1.appendChild($option1);\n $dropDown2.appendChild($option2);\n }\n \n var legend1 = L.control({position: 'bottomleft'});\n\n legend1.onAdd = function (map) {\n\n var div1 = L.DomUtil.create('div', 'legend'),\n grades1 = [1, 10, 20, 30, 40, 50, 75, 90],\n labels1 = ['<strong> Percentile </strong>'];\n for (var i = 0; i < grades1.length; i++) {\n div1.innerHTML += '<i style=\"background:' + chooseColor(grades1[i], 100) + '\"></i> ' +\n grades1[i] + (grades1[i + 1] ? '&ndash;' + grades1[i + 1] + '<br>' : '+');\n\n }\n\n return div1;\n };\n\n var legend2 = L.control({position: 'bottomleft'});\n\n legend2.onAdd = function (map) {\n\n var div2 = L.DomUtil.create('div', 'legend'),\n grades2 = [1, 10, 20, 30, 40, 50, 75, 90],\n labels2 = ['<strong> Percentile </strong>'];\n for (var i = 0; i < grades2.length; i++) {\n div2.innerHTML += '<i style=\"background:' + chooseColor(grades2[i], 100) + '\"></i> ' +\n grades2[i] + (grades2[i + 1] ? '&ndash;' + grades2[i + 1] + '<br>' : '+');\n }\n\n return div2;\n };\n\n legend1.addTo(map1);\n legend2.addTo(map2);\n plot(map1,'order_66');\n plot(map2,'order_66');\n};\n\ninit()\n\n// plot(map1, document.getElementById(\"selData1\").value)\ndocument.getElementById(\"selData1\").addEventListener(\"change\", changeDrop1);\ndocument.getElementById(\"selData2\").addEventListener(\"change\", changeDrop2);\n\nfunction changeDrop1() {\n plot(map1, document.getElementById(\"selData1\").value)\n};\nfunction changeDrop2() {\n plot(map2, document.getElementById(\"selData2\").value)\n};" }, { "alpha_fraction": 0.5465748310089111, "alphanum_fraction": 0.5770206451416016, "avg_line_length": 33.85714340209961, "blob_id": "5ddf8a57fa3cc54e8910dd0138be6a415fce1d23", "content_id": "8daae39a77465701a9248e19b205b4338f4a6228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2759, "license_type": "no_license", "max_line_length": 126, "num_lines": 77, "path": "/Jamie/bar_graph/test.js", "repo_name": "racquesta/powerball_viz", "src_encoding": "UTF-8", "text": "// In the actual plot, there would be the years 2010 ~ 2017\r\n// All states that we have data on will go in states_data, and null data would = 0\r\n// Data would be sales/population to simulate how much a person spent on average on power ball tickets that year in that state\r\n\r\nvar sales_data = [{'year':2010, 'states_data':[{'state':'New Jersey','draw_sales':56,'pp_sales':12},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'New York','draw_sales':44,'pp_sales':21},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Florida','draw_sales':61,'pp_sales':9},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Massachusetts','draw_sales':92,'pp_sales':18},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Texas','draw_sales':52,'pp_sales':7}]},\r\n\t\t\t\t{'year':2011, 'states_data':[{'state':'New Jersey','draw_sales':48,'pp_sales':14},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'New York','draw_sales':82,'pp_sales':21},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Florida','draw_sales':32,'pp_sales':11},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Massachusetts','draw_sales':73,'pp_sales':11},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Texas','draw_sales':62,'pp_sales':8}]},\r\n\t\t\t\t{'year':2012, 'states_data':[{'state':'New Jersey','draw_sales':58,'pp_sales':12},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'New York','draw_sales':35,'pp_sales':17},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Florida','draw_sales':66,'pp_sales':9},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Massachusetts','draw_sales':77,'pp_sales':23},\r\n\t\t\t\t\t\t\t\t\t\t\t{'state':'Texas','draw_sales':59,'pp_sales':4}]}\r\n\t\t\t\t];\r\n\r\nvar $selDataset = document.getElementById(\"selDataset\");\r\nd3.json(\"/years\", function(error, response) {\r\n if (error) return console.log(error);\r\n var salesYears = response;\r\nfor (var i = 0; i < saleYears.length; i++) {\r\n var saleYear = saleYears[i];\r\n var $option = document.createElement(\"option\");\r\n $option.setAttribute(\"value\", saleYear);\r\n $option.innerHTML = saleYear;\r\n $selDataset.appendChild($option);\r\n };\r\n\r\nfor (i = 0; i < sales_data.length; i++) { \r\n var state_names = sales_data[i]['states_data'].map(ayy => ayy.state);\r\n var state_draw_sales = sales_data[i]['states_data'].map(ayy => ayy.draw_sales);\r\n var state_pp_sales = sales_data[i]['states_data'].map(ayy => ayy.pp_sales);\r\n var year = sales_data[i]['year']\r\n\r\n var trace_draw = {\r\n\t x: state_names,\r\n\t y: state_draw_sales,\r\n\t name: 'draw_sales',\r\n\t type: 'bar'\r\n\t};\r\n\r\n\tvar trace_pp = {\r\n\t x: state_names,\r\n\t y: state_pp_sales,\r\n\t name: 'pp_sales',\r\n\t type: 'bar'\r\n\t};\r\n\r\n\tvar data = [trace_draw, trace_pp];\r\n\r\n\tvar layout = {\r\n\t title: year + \" data\",\r\n\t barmode: 'stack',\r\n\t showlegend: false,\r\n\t xaxis: {\r\n\t tickangle: -45\r\n\t },\r\n\t yaxis: {\r\n\t zeroline: false,\r\n\t gridwidth: 2\r\n\t },\r\n\t bargap :0.1\r\n\t};\r\n\t\r\n\tvar div = document.createElement('div');\r\n\tdocument.body.appendChild(div);\r\n\tvar div_id = year + \"_data\"\r\n\tdiv.id = div_id;\r\n\r\n\tPlotly.newPlot(div_id, data, layout);\r\n\r\n}" } ]
7
eversonatbeaver/EngineeringDesign
https://github.com/eversonatbeaver/EngineeringDesign
edc5eada6ce71c2cf8f7ff3c09c0b8f5e35de09a
984e08f3c66547ed67aec2cd5d6e569ad05dcfac
ce2ec908ea7b5590a129e9e8ec44b2c455bcd106
refs/heads/master
2020-04-10T13:29:46.759832
2019-02-28T14:43:10
2019-02-28T14:43:10
161,052,196
0
7
MIT
2018-12-09T15:16:43
2019-01-09T03:29:20
2019-01-09T21:43:57
Python
[ { "alpha_fraction": 0.5602218508720398, "alphanum_fraction": 0.6077654361724854, "avg_line_length": 33.10810852050781, "blob_id": "6bef181701157cde4ffd6e450bc0bfa75b814149", "content_id": "bf9caab47c0290cb49988af36c27aaa783bb63fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1262, "license_type": "permissive", "max_line_length": 76, "num_lines": 37, "path": "/PhilippeDivisors.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "number = int(input(\"Enter a number and see what its divisors are (1-10): \"))\nif (number % 2) == 0:\n print(\"{0} is divisible by 2\".format(number))\nelse:\n print(\"{0} is not disible by 2\".format(number))\nif (number % 3) == 0:\n print(\"{0} is divisible by 3\".format(number))\nelse:\n print(\"{0} is not disible by 3\".format(number))\nif (number % 4) == 0:\n print(\"{0} is divisible by 4\".format(number))\nelse:\n print(\"{0} is not disible by 4\".format(number))\nif (number % 5) == 0:\n print(\"{0} is divisible by 5\".format(number))\nelse:\n print(\"{0} is not disible by 5\".format(number))\nif (number % 6) == 0:\n print(\"{0} is divisible by 6\".format(number))\nelse:\n print(\"{0} is not disible by 6\".format(number))\nif (number % 7) == 0:\n print(\"{0} is divisible by 7\".format(number))\nelse:\n print(\"{0} is not disible by 7\".format(number))\nif (number % 8) == 0:\n print(\"{0} is divisible by 8\".format(number))\nelse:\n print(\"{0} is not disible by 8\".format(number))\nif (number % 9) == 0:\n print(\"{0} is divisible by 9\".format(number))\nelse:\n print(\"{0} is not disible by 9\".format(number))\nif (number % 10) == 0:\n print(\"{0} is divisible by 10\".format(number))\nelse:\n print(\"{0} is not disible by 10\".format(number))\n" }, { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.7631579041481018, "avg_line_length": 24.16666603088379, "blob_id": "a5dc85a4b3e9c1c014cc9ca5250dbf3d4bbe7d12", "content_id": "fd37d8a7bbf98ed22699c021f5b437889c57c237", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "permissive", "max_line_length": 65, "num_lines": 6, "path": "/README.md", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "# EngineeringDesign\n\nHey Team, I'm glad you've made it here. \nThis is our class repository where you can upload your build III.\n\nemail with questions!\n\n" }, { "alpha_fraction": 0.7489539980888367, "alphanum_fraction": 0.7615062594413757, "avg_line_length": 33.14285659790039, "blob_id": "1834a3144f2df2cd236ff7624531fdbbff0b3181", "content_id": "856c89b9618817aef6f9751f71850eecd5176c70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "permissive", "max_line_length": 73, "num_lines": 7, "path": "/LinaQuote.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "GGGGGGGGGGGGGGGGGG = 'GGGGGGGGGGGGGGGGGG'\n\nfor letter in GGGGGGGGGGGGGGGGGG:\n print(\"Good things come to those who wait\")\n\nfor GGGGGGGGGGGGGGGGGG in range(0, 15):\n print \"Things worth having don't come easy %d\" % (GGGGGGGGGGGGGGGGGG)\n" }, { "alpha_fraction": 0.6276445984840393, "alphanum_fraction": 0.6332862973213196, "avg_line_length": 21.15625, "blob_id": "8cb87be69709d1f4f4a883b197abd3e7e953b499", "content_id": "8becc9adaaf4913b42c12b0d2462eb3a6555da29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 709, "license_type": "permissive", "max_line_length": 107, "num_lines": 32, "path": "/brinkbuildthree.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "import random as rand\n#neckless_gerald = \" o \\n \\|/ \\n / \\\\\"\n# o\n#\\|/\n#/ \\\ndef GeraldExtender(neck_size):\n\n\n print(\" o\")\n for i in range(neck_size):\n print(\" |\")\n print(\" \\|/\")\n print(\" / \\\\\")\n\n\nprint(\"Hello, Gerald has a bizarre disease called Giraffian Neck Syndrome. His neck can grown on command.\")\nprint(\"how big should his neck be?\")\n\nuser_neck_size = int(input())\nGeraldExtender(user_neck_size)\n#print(user_input)\n\nprint(\"How many friends should Gerald have?\")\n\ngerald_no_friends = int(input())\n\nif gerald_no_friends > 0:\n for i in range(gerald_no_friends):\n friend_neck_size = rand.randint(0,10)\n GeraldExtender(friend_neck_size)\nelse:\n print(\"Poor Gerlad!\")\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 21, "blob_id": "3adc292d8d48bc9fd238b80124e8a06b8dfa5455", "content_id": "6f6c6527f36e7fb0f0afdf47372f4b5a5a7899ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "permissive", "max_line_length": 21, "num_lines": 1, "path": "/christian.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "print (\"helloworld\")" }, { "alpha_fraction": 0.631205677986145, "alphanum_fraction": 0.6595744490623474, "avg_line_length": 20.69230842590332, "blob_id": "e008a2e62bc8e2541af22241bf118e6a81ce5bc6", "content_id": "500bfc822e240f3b77046c990c7de8736e770974", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "permissive", "max_line_length": 43, "num_lines": 13, "path": "/main.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "harder=1\nbetter=2\nfaster=3\nstronger=4\nDaft_Punk = str(raw_input(\"Pick_a_Number\"))\nif Daft_Punk == '1':\n print(\"Harder\")\nif Daft_Punk == '2':\n print(\"Harder_Better\")\nif Daft_Punk == '3':\n print(\"Harder_Better_Faster\")\nif Daft_Punk == '4':\n print(\"Harder_Better_Faster_Stronger\")\n" }, { "alpha_fraction": 0.5377358198165894, "alphanum_fraction": 0.5754716992378235, "avg_line_length": 27.266666412353516, "blob_id": "d059a268bd7c7905d7a680d9726be4317110cae1", "content_id": "753b9f7310d522c668046752a4d8fbe1ac0a2e24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 424, "license_type": "permissive", "max_line_length": 61, "num_lines": 15, "path": "/aws.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "import random\nn = random.randint(0, 100)\nguess = int(raw_input(\"Chose a number from 0-100: \"))\nwhile n != \"guess\":\n print\n if guess < n:\n print \"try a higher number\"\n guess = int(raw_input(\"Chose a number from 0-100: \"))\n elif guess > n:\n print \"try a lower number\"\n guess = int(raw_input(\"Chose a number from 0-100: \"))\n else:\n print \"that's correct!\"\n break\n print\n" }, { "alpha_fraction": 0.5847107172012329, "alphanum_fraction": 0.6280992031097412, "avg_line_length": 14.580645561218262, "blob_id": "c592c61131f989efc2da5b46f7ec1c9131e25d7d", "content_id": "d2abcd131ec7521d3b7fc9fd5d0c45a61bccc79a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "permissive", "max_line_length": 78, "num_lines": 31, "path": "/Phone Number Generator.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "\nimport random, time\nmin = 1000000000\nmax = 9999999999\n\n\nroll_again = \"again\"\n\nwhile roll_again == \"again\" or roll_again == \"ag\":\n print \"\"\"\n\n\n\n\n\n\n\n\n\nGenerating phone numbers to prank call...\n\n \"\"\"\n time.sleep(1)\n print \"\"\"The numbers are....\n\n \"\"\"\n print random.randint(min, max)\n print random.randint(min, max)\n print random.randint(min, max)\n print random.randint(min, max)\n\n roll_again = raw_input(\"If none of these are satisfactory, type 'again' \")\n" }, { "alpha_fraction": 0.5985221862792969, "alphanum_fraction": 0.6133005023002625, "avg_line_length": 28, "blob_id": "e46db5865041732b0808c9ecfec0e23f89833ed4", "content_id": "cab4474e2e6c03f2e2446cc37e0355c40baebaf0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "permissive", "max_line_length": 51, "num_lines": 14, "path": "/guessing_game.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "import random\nnumber = random.randint(1,17)\nwin = False\nprint (\"I'm thinking of a number between 1 and 17\")\nwhile not win:\n guess = int(input(\"What is your guess?\"))\n if guess == number:\n win = True\n print(\"You're right!\")\n elif guess < number:\n print (\"Guess higher\")\n elif guess > number:\n print (\"Guess lower\")\nprint(\"You win! The number was {}\". format(number))\n" }, { "alpha_fraction": 0.7662337422370911, "alphanum_fraction": 0.7740259766578674, "avg_line_length": 26.5, "blob_id": "9ccca66e3fd101b5eae5e0f6ec192927ad0680bc", "content_id": "6179897b4d6c405dae46f00a7cce280bce9a7655", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "permissive", "max_line_length": 71, "num_lines": 14, "path": "/evy3.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "#has output be a string a characters\nimport string\n\n#has output be random assortment\nfrom random import *\n\n#has output be made of differnt numbers, punctuations, and digits\ncharacters = string.ascii_letters + string.punctuation + string.digits\n\n#gives limits to output\npassword = \"\".join(choice(characters) for x in range(randint(8, 16)))\n\n#prints the actual password\nprint password\n" }, { "alpha_fraction": 0.7201257944107056, "alphanum_fraction": 0.7327044010162354, "avg_line_length": 38.75, "blob_id": "02b784fde1b3009b65aae944553f54483c1b25de", "content_id": "429fcf8a5e7d1db1d7404082e05cfaef0f347a7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "permissive", "max_line_length": 156, "num_lines": 8, "path": "/evy2.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "#has code choose random option/restaurant\nimport random\n\n#list all of the options\nrestaurants = ['Tango Mango', 'Juice Press', 'The Bagel Table', 'Shake Shack', 'Panera', 'Wegmans', 'Oath', 'Sweetgreen', 'Johnys','Tender Greens', '11/11']\n\n#has code print random choice of restaurant\nprint(random.choice(restaurants))\n" }, { "alpha_fraction": 0.7220077514648438, "alphanum_fraction": 0.7220077514648438, "avg_line_length": 30.375, "blob_id": "790debf443593358555eae59d2bf068545331951", "content_id": "8f61050c1cb5902aaeeb7693a83aa574671de5b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "permissive", "max_line_length": 54, "num_lines": 8, "path": "/factorial.py", "repo_name": "eversonatbeaver/EngineeringDesign", "src_encoding": "UTF-8", "text": "#this is your factorial - type in an integer above\r\nprint(\"Input that number that your want to factorial\")\r\ndesired_factorial = int(input())\r\n\r\nfor i in range(desired_factorial): \r\n desired_factorial = desired_factorial * i\r\n\r\nprint(desired_factorial)\r\n" } ]
12
slommi71/impfterminservice.de_scraper
https://github.com/slommi71/impfterminservice.de_scraper
29d3f12c36b974656bacc73e4ef1d6451839516f
28737556447a47301c00057f18d18c864b73cca1
03c7c754e4c5beaa4c3abbf7cbb35b0e33c61d52
refs/heads/main
2023-05-01T07:40:32.376237
2021-05-23T13:31:37
2021-05-23T13:31:37
363,143,394
0
0
null
2021-04-30T13:06:35
2021-05-04T10:12:12
2021-05-23T13:31:37
Python
[ { "alpha_fraction": 0.7674418687820435, "alphanum_fraction": 0.7674418687820435, "avg_line_length": 7, "blob_id": "03b29538822780c3137b91919fc759d0296336d6", "content_id": "04fd19d2e87e822a2726cdb59fbdd0b048a2b4f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 43, "license_type": "no_license", "max_line_length": 8, "num_lines": 5, "path": "/requirements.txt", "repo_name": "slommi71/impfterminservice.de_scraper", "src_encoding": "UTF-8", "text": "# dotenv\r\npyyaml\r\ncolorama\r\nselenium\r\nutils" }, { "alpha_fraction": 0.6403940916061401, "alphanum_fraction": 0.6403940916061401, "avg_line_length": 20.75, "blob_id": "22f58107b5424a8eb65a10e7d93b9994dbfe1a9d", "content_id": "ac73d43e1ad7a1941b886a0e182c8f19c0bb19ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 83, "num_lines": 28, "path": "/src/main.py", "repo_name": "slommi71/impfterminservice.de_scraper", "src_encoding": "UTF-8", "text": "\"\"\" reads from impfterminservice.de\n \"\"\"\nimport os\nimport yaml\n\n# from dotenv.main import load_dotenv\n\nfrom utils.scraper import TerminScraper\n\n# load_dotenv()\n\n\ndef read_config():\n with open('./etc/config.yaml', 'rt') as f:\n return yaml.safe_load(f.read())\n\nif __name__ == \"__main__\":\n\n config = read_config()\n\n # trial_zentrum = {\"bundesland\": \"Baden-Württemberg\", \"address\": \"Ludwigsburg\"}\n\n ts = TerminScraper(\n driver_path=config['chromiumdriverpath'],\n impfzentrum=config['trial_zentrum'],\n vermittlungscode=config['VERMITTLUNGSCODE'],\n )\n ts.check_in()\n" } ]
2
DNason1999/Algorithms
https://github.com/DNason1999/Algorithms
9c4068873404d8997e242e13815889fcf0b7b7c0
abdcb1966096c1953c617aea5fbceeeaf35c6a6f
2c92947c606b0f5aa55570fa3891c2d17c5b3d63
refs/heads/master
2022-04-17T18:49:27.030333
2020-04-16T18:02:08
2020-04-16T18:02:08
256,291,330
0
0
null
2020-04-16T18:01:33
2020-04-10T04:24:48
2020-04-16T17:32:53
null
[ { "alpha_fraction": 0.6299999952316284, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 30.863636016845703, "blob_id": "bea04eec1283accbb7c50a604388dd2b4c4dacad", "content_id": "1b6a5110b4e6133e0f60aa0217703b78b2b319a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 164, "num_lines": 22, "path": "/recipe_batches/recipe_batches.py", "repo_name": "DNason1999/Algorithms", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport math\n\ndef recipe_batches(recipe, ingredients):\n batch_qty = []\n try:\n for ing_r in recipe:\n batches = ingredients[ing_r]//recipe[ing_r]\n batch_qty.append(batches)\n except KeyError as ke:\n return 0\n\n return min(batch_qty)\n\n\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test \n # your implementation with different inputs\n recipe = { 'milk': 100, 'butter': 50, 'flour': 5 }\n ingredients = { 'milk': 132, 'butter': 48, 'flour': 51 }\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(batches=recipe_batches(recipe, ingredients), ingredients=ingredients))" } ]
1
yinyanghu/MiningPoolMonitor
https://github.com/yinyanghu/MiningPoolMonitor
467aec7b4207295ab298fdc479239a1b05bed49c
ca24178f30b7be97d6226a8341a9aaf10f918491
2171859292c133899febe1892a1cdf4cd05569ae
refs/heads/master
2021-05-02T00:11:42.641010
2019-01-27T04:12:51
2019-01-27T04:12:51
120,938,649
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5469313859939575, "alphanum_fraction": 0.5582367181777954, "avg_line_length": 33.34257888793945, "blob_id": "0d30455880537e9cb79236f3382b6630a79f0770", "content_id": "8190ae449bbf7a7ba0fecf654a8f69dac48600be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21052, "license_type": "no_license", "max_line_length": 145, "num_lines": 613, "path": "/mining_pool_monitor.py", "repo_name": "yinyanghu/MiningPoolMonitor", "src_encoding": "UTF-8", "text": "import requests\nimport datetime\nimport time\nimport os\n\netn_wallet_address = 'etnkH3JcwEG4i2eApbeJk6fYMGmYAWc9yCZmVWEWdsa9XETkzWEY6o9M76AGhWUnrBVzuCor7vGSQHgxYLmdUGeeAaPih64cmM'\npas_wallet_address = '86646.2f6e24867ad0c6fd'\neth_wallet_address = '45f410e92683dAE322d91F2C8b26193b0FC3464D'\n\ntext_normal = '\\033[0m'\n\ndef bold(text):\n return '\\033[1m' + text + text_normal\n\n\ndef white(text):\n return '\\033[97m' + text + text_normal\n\n\ndef red(text):\n return '\\033[91m' + text + text_normal\n\n\ndef yellow(text):\n return '\\033[93m' + text + text_normal\n\n\ndef cyan(text):\n return '\\033[96m' + text + text_normal\n\n\ndef purple(text):\n return '\\033[95m' + text + text_normal\n\n\ndef request_data(url):\n # print(url)\n # print(requests.get(url))\n r = requests.get(url).json()\n if r['status']:\n return r['data']\n else:\n print(r)\n print('Error: ' + url)\n return None\n\n\ndef format_number(number):\n unit_lst = [x for x in ['', 'K', 'M', 'G', 'T', 'P']]\n unit = 0\n while number >= 1000:\n unit += 1\n number /= 1000\n return '%.3f' % number + ' ' + unit_lst[unit]\n\n\ndef format_hashrate(hashrate, base_unit):\n return format_number(hashrate * base_unit) + 'H/s'\n\n\ndef format_wallet_address(address):\n if len(address) > 30:\n return address[:10] + '...' + address[-10:]\n else:\n return address\n\n\ndef convert_to_eth(amount):\n if amount is None:\n return 0\n else:\n return amount / 1e18\n\n\ndef convert_to_hashrate(hashrate):\n if hashrate is None:\n return 0\n elif isinstance(hashrate, dict):\n hashrates = {}\n for key in hashrate:\n if key[0] == 'h' and key[1:].isdigit():\n hashrates[key] = float(hashrate[key])\n return hashrates\n else:\n return float(hashrate)\n\n\nclass Worker:\n def __init__(self, name, hashrate, base_unit, last_seen, rating=None,\n avg_hashrate=None, reported_hashrate=None,\n valid_share=None, invalid_share=None, stale_share=None):\n self.name = name\n self.rating = rating\n self.hashrate = hashrate\n self.base_unit = base_unit\n self.last_seen = last_seen\n self.avg_hashrate = avg_hashrate\n self.reported_hashrate = reported_hashrate\n self.valid_share = valid_share\n self.invalid_share =invalid_share\n self.stale_share = stale_share\n\n def __str__(self):\n s = bold(yellow('%15s: ' % (self.name)))\n s += 'Hashrate: %s (effective)' % red(format_hashrate(self.hashrate, self.base_unit))\n if self.reported_hashrate is not None:\n s += ', %s (reported)' % red(format_hashrate(self.reported_hashrate, self.base_unit))\n if self.avg_hashrate is not None:\n if 'h1' in self.avg_hashrate:\n s += ', %s (1 hour)' % format_hashrate(self.avg_hashrate['h1'], self.base_unit)\n if 'h24' in self.avg_hashrate:\n s += ', %s (1 day)' % format_hashrate(self.avg_hashrate['h24'], self.base_unit)\n\n s += ';\\n' + (' ' * 17) + 'Last Seen: ' + str(self.last_seen)\n if self.rating is not None:\n s += '; Rating: ' + str(self.rating)\n if (self.valid_share is not None) and (self.invalid_share is not None) and (self.stale_share is not None):\n s += '; Share: %d, %d, %d' % (self.valid_share, self.invalid_share, self.stale_share)\n\n return s\n\n\nclass Payment:\n def __init__(self, amount, confirmed, date):\n self.amount = amount\n self.confirmed = confirmed\n self.date = date\n self.duration = None\n\n def update_duration(self, duration):\n self.duration = duration\n\n def __str__(self):\n return 'Amount: %.10f, Date: %s, Duration: %.2f hours, Confirmed: %s' % (self.amount, str(self.date), self.duration, str(self.confirmed))\n\n\nclass Account:\n def __init__(self, wallet_address, base_unit):\n self.wallet_address = wallet_address\n\n self.balance = 0\n self.unconfirmed_balance = None\n self.current_hashrate = 0\n self.base_unit = base_unit\n self.current_reported_hashrate = None\n self.avg_hashrate = None\n\n self.last_seen = None\n self.valid_share = None\n self.invalid_share = None\n self.stale_share = None\n self.valid_percent = None\n self.invalid_percent = None\n self.stale_percent = None\n\n self.active_worker = None\n self.workers = None\n\n self.payments = None\n self.total_payment = None\n\n def get_all_balance(self):\n total = self.balance\n if self.unconfirmed_balance is not None:\n total += self.unconfirmed_balance\n return total\n\n def get_hashrate(self):\n return self.avg_hashrate['h1']\n\n def get_total_payment(self):\n return self.total_payment\n\n def update(self, balance, current_hashrate, unconfirmed_balance=None,\n current_reported_hashrate=None, avg_hashrate=None,\n last_seen=None,\n valid_share=None, invalid_share=None, stale_share=None,\n active_worker=None):\n self.balance = balance\n self.unconfirmed_balance = unconfirmed_balance\n self.current_hashrate = current_hashrate\n self.current_reported_hashrate = current_reported_hashrate\n self.avg_hashrate = avg_hashrate\n self.last_seen = last_seen\n self.valid_share = valid_share\n self.invalid_share = invalid_share\n self.stale_share = stale_share\n if (valid_share is not None) and (invalid_share is not None) and (stale_share is not None):\n total_share = valid_share + invalid_share + stale_share\n self.valid_percent = valid_share / total_share * 100.0\n self.invalid_percent = invalid_share / total_share * 100.0\n self.stale_percent = stale_share / total_share * 100.0\n self.active_worker = active_worker\n\n def update_workers(self, workers):\n self.workers = workers\n\n def update_payments(self, payments):\n for i in range(len(payments) - 1):\n diff = payments[i].date - payments[i + 1].date\n hours = diff.seconds / 3600 + diff.days * 24\n payments[i].update_duration(hours)\n self.payments = payments\n\n self.total_payment = 0\n for payment in payments:\n self.total_payment += payment.amount\n\n def __str__(self):\n s = bold(white('Account: ' + format_wallet_address(self.wallet_address))) + '\\n'\n s += '\\t' + bold(purple('Balance: %.10f' % (self.balance)))\n if self.unconfirmed_balance is not None:\n s += '\\t' + 'Unconfirmed Balance: %.10f' % (self.unconfirmed_balance)\n if self.last_seen is not None:\n s += '\\t' + 'Last Seen: ' + str(self.last_seen)\n s += '\\n' + '\\n'\n s += bold(white('Hashrate:')) + '\\n'\n s += '\\t' + 'Current: ' + red(format_hashrate(self.current_hashrate, self.base_unit))\n if self.current_reported_hashrate is not None:\n s += '\\t' + 'Current Reported: ' \\\n + red(format_hashrate(self.current_reported_hashrate, self.base_unit))\n if self.avg_hashrate is not None:\n if 'h1' in self.avg_hashrate:\n s += '\\t' + '1 Hour Average: ' \\\n + format_hashrate(self.avg_hashrate['h1'], self.base_unit)\n\n if 'h24' in self.avg_hashrate:\n s += '\\t' + '1 Day Average: ' \\\n + format_hashrate(self.avg_hashrate['h24'], self.base_unit)\n s += '\\n'\n if (self.valid_share is not None) and (self.invalid_share is not None) and (self.stale_share is not None):\n s += '\\t' + 'Valid Share: %d (%.2f%%)' % (self.valid_share, self.valid_percent)\n s += '\\t' + 'Invalid Share: %d (%.2f%%)' % (self.invalid_share, self.invalid_percent)\n s += '\\t' + 'Stale Share: %d (%.2f%%)' % (self.stale_share, self.stale_percent) + '\\n'\n s += '\\n'\n s += bold(white('Workers:'))\n if self.active_worker is not None:\n s += ' ' + cyan(bold(str(self.active_worker) + ' Active'))\n s += '\\n'\n s += '\\n'.join([str(worker) for worker in self.workers]) + '\\n'\n s += '\\n'\n s += bold(white('Payment:')) + '\\n'\n s += '\\t' + bold(yellow('Total Amount: ' + str(self.total_payment))) + '\\n'\n s += '\\t' + '\\n\\t'.join([str(payment) for payment in self.payments[:4]])\n return s\n\n\nclass Price:\n def __init__(self):\n self.usd = 0\n self.btc = 0\n\n def update(self, usd, btc):\n self.usd = usd\n self.btc = btc\n\n def get_usd_price(self):\n return self.usd\n\n def __str__(self):\n return bold(white('Price:')) + '\\n' \\\n + '\\t' + 'USD: $' + str(self.usd) + ',' \\\n + '\\t' + 'BTC: ' + str(self.btc)\n\n\nclass Estimation:\n def __init__(self, payment_limit):\n self.payment_limit = payment_limit\n\n self.estimated_profit = 0\n\n self.hour_coin = 0\n self.hour_usd = 0\n self.day_coin = 0\n self.day_usd = 0\n self.month_coin = 0\n self.month_usd = 0\n\n self.next_payment_time = 0\n\n def update(self, estimated_profit, balance,\n hour_coin, hour_usd, day_coin, day_usd, month_coin, month_usd):\n self.estimated_profit = estimated_profit\n\n self.hour_coin = hour_coin\n self.hour_usd = hour_usd\n self.day_coin = day_coin\n self.day_usd = day_usd\n self.month_coin = month_coin\n self.month_usd = month_usd\n\n if self.hour_coin == 0:\n self.next_payment_time = float('inf')\n else:\n self.next_payment_time = max(0, (self.payment_limit - balance) / self.hour_coin)\n\n def update_per_min(self, estimated_profit, balance,\n minute_coin, minute_usd):\n self.estimated_profit = estimated_profit\n\n self.hour_coin = minute_coin * 60.0\n self.hour_usd = minute_usd * 60.0\n self.day_coin = self.hour_coin * 24.0\n self.day_usd = self.hour_usd * 24.0\n self.month_coin = self.day_coin * 30.0\n self.month_usd = self.day_usd * 30.0\n\n self.next_payment_time = (self.payment_limit - balance) / self.hour_coin\n\n def __str__(self):\n s = bold(white('Estimation:')) + '\\n'\n s += '\\t' + bold(yellow('Total: $%.2f' % self.estimated_profit)) + '\\n'\n s += '\\t' + 'Hour: %.10f ($%.2f)' % (self.hour_coin, self.hour_usd)\n s += '\\t' + 'Day: %.10f ($%.2f)' % (self.day_coin, self.day_usd)\n s += '\\t' + 'Month: %.10f ($%.2f)' % (self.month_coin, self.month_usd) + '\\n'\n s += '\\t' + bold(red('Next Payment: %.2f hours' % self.next_payment_time))\n return s\n\n\nclass Network:\n def __init__(self, base_unit):\n self.hashrate = 0\n self.block_time = 0\n self.difficulty = 0\n self.base_unit = base_unit\n\n def update(self, hashrate, block_time, difficulty):\n self.hashrate = hashrate\n self.block_time = block_time\n self.difficulty = difficulty\n\n def __str__(self):\n s = bold(white('Network:')) + '\\n'\n s += '\\t' + 'Hashrate: ' + format_hashrate(self.hashrate, self.base_unit) + '\\n'\n s += '\\t' + 'Block Time: %.1fs' % self.block_time + '\\n'\n s += '\\t' + 'Difficulty: ' + format_number(self.difficulty)\n return s\n\n\nclass NanoPool:\n def __init__(self, name, coin, base_unit, wallet_address):\n self.api = 'https://api.nanopool.org/v1/'\n self.name = name\n self.coin = coin\n self.wallet_address = wallet_address\n self.payment_limit = self.__update_payment_limit()\n\n self.hashrate = 0\n self.base_unit = base_unit\n\n self.account = Account(wallet_address, base_unit)\n self.price = Price()\n\n self.estimation = Estimation(self.payment_limit)\n\n def update(self):\n self.__update_pool_hashrate()\n self.__update_account()\n self.__update_price()\n self.__update_estimation()\n\n def __update_payment_limit(self):\n url = self.api + self.coin + '/usersettings/' + self.wallet_address\n data = request_data(url)\n return float(data['payout'])\n\n def __update_account(self):\n url = self.api + self.coin + '/reportedhashrate/' + self.wallet_address\n data = request_data(url)\n current_reported_hashrate=convert_to_hashrate(data)\n\n url = self.api + self.coin + '/user/' + self.wallet_address\n data = request_data(url)\n\n self.account.update(\n balance=float(data['balance']),\n unconfirmed_balance=float(data['unconfirmed_balance']),\n current_reported_hashrate=current_reported_hashrate,\n current_hashrate=convert_to_hashrate(data['hashrate']),\n avg_hashrate=convert_to_hashrate(data['avgHashrate']))\n\n self.__update_account_workers(data['workers'])\n\n self.__update_account_payments()\n\n def __update_account_workers(self, data):\n workers = []\n for one in data:\n worker = Worker(\n name=one['id'],\n rating=int(one['rating']),\n hashrate=convert_to_hashrate(one['hashrate']),\n base_unit=self.base_unit,\n last_seen=datetime.datetime.fromtimestamp(one['lastshare']),\n avg_hashrate=convert_to_hashrate(one))\n workers.append(worker)\n workers.sort(key=lambda worker: worker.hashrate, reverse=True)\n self.account.update_workers(workers)\n\n def __update_account_payments(self):\n url = self.api + self.coin + '/payments/' + self.wallet_address\n data = request_data(url)\n payments = []\n for one in data:\n payment = Payment(\n float(one['amount']),\n bool(one['confirmed']),\n datetime.datetime.fromtimestamp(one['date']))\n payments.append(payment)\n\n self.account.update_payments(payments)\n\n def __update_pool_hashrate(self):\n url = self.api + self.coin + '/pool/hashrate'\n self.hashrate = convert_to_hashrate(request_data(url))\n\n def __update_price(self):\n url = self.api + self.coin + '/prices'\n data = request_data(url)\n usd = float(data['price_usd'])\n btc = float(data['price_btc'])\n self.price.update(usd, btc)\n\n def __update_estimation(self):\n current_hashrate = self.account.get_hashrate()\n if current_hashrate == 0:\n self.estimation.update(self.__get_profit(), self.account.get_all_balance(), 0, 0, 0, 0, 0, 0)\n else:\n url = self.api + self.coin + '/approximated_earnings/' + str(current_hashrate)\n data = request_data(url)\n\n self.estimation.update(\n self.__get_profit(),\n self.account.get_all_balance(),\n float(data['hour']['coins']),\n float(data['hour']['dollars']),\n float(data['day']['coins']),\n float(data['day']['dollars']),\n float(data['month']['coins']),\n float(data['month']['dollars']))\n\n def __get_profit(self):\n return self.account.get_total_payment() * self.price.get_usd_price()\n\n def __str__(self):\n s = bold(cyan(self.name)) + '\\n'\n s += bold(cyan('==================')) + '\\n'\n s += '\\n'\n s += bold(white('Pool:')) + '\\n'\n s += '\\t' + 'Hashrate: ' + format_hashrate(self.hashrate, self.base_unit) + '\\n'\n s += '\\t' + 'Payment Limit: ' + str(self.payment_limit) + '\\n'\n s += '\\n'\n s += str(self.account) + '\\n'\n s += '\\n'\n s += str(self.price) + '\\n'\n s += '\\n'\n s += str(self.estimation)\n return s\n\n\nclass Ethermine:\n def __init__(self, name, wallet_address):\n self.api = 'http://api.ethermine.org'\n self.name = name\n self.wallet_address = wallet_address\n self.payment_limit = self.__update_payment_limit()\n\n self.hashrate = 0\n self.account = Account(wallet_address, 1)\n self.price = Price()\n self.network = Network(1)\n self.estimation = Estimation(self.payment_limit)\n\n def update(self):\n self.__update_pool_and_price()\n self.__update_network()\n self.__update_account_and_estimation()\n\n def __update_payment_limit(self):\n url = self.api + '/miner/' + self.wallet_address + '/settings'\n data = request_data(url)\n return convert_to_eth(float(data['minPayout']))\n\n def __update_account_and_estimation(self):\n url = self.api + '/miner/' + self.wallet_address + '/currentStats'\n data = request_data(url)\n self.account.update(\n balance=convert_to_eth(data['unpaid']),\n unconfirmed_balance=convert_to_eth(data['unconfirmed']),\n current_hashrate=convert_to_hashrate(data['currentHashrate']),\n current_reported_hashrate=convert_to_hashrate(data['reportedHashrate']),\n avg_hashrate={'h24': convert_to_hashrate(data['averageHashrate'])},\n last_seen=datetime.datetime.fromtimestamp(data['lastSeen']),\n valid_share=int(data['validShares']),\n invalid_share=int(data['invalidShares']),\n stale_share=int(data['staleShares']),\n active_worker=int(data['activeWorkers']))\n\n self.__update_account_workers()\n self.__update_account_payments()\n\n self.estimation.update_per_min(\n self.__get_profit(),\n self.account.get_all_balance(),\n data['coinsPerMin'],\n data['usdPerMin'])\n\n\n def __update_account_workers(self):\n url = self.api + '/miner/' + self.wallet_address + '/workers'\n data = request_data(url)\n workers = []\n for one in data:\n # print(one['validShares'])\n worker = Worker(\n name=one['worker'],\n last_seen=datetime.datetime.fromtimestamp(one['lastSeen']) if one['lastSeen'] is not None else '-',\n hashrate=convert_to_hashrate(one['currentHashrate']),\n base_unit = 1,\n avg_hashrate={'h24': convert_to_hashrate(one['averageHashrate'])},\n reported_hashrate=convert_to_hashrate(one['reportedHashrate']),\n valid_share=int(one['validShares']) if one['validShares'] is not None else -1,\n invalid_share=int(one['invalidShares']) if one['invalidShares'] is not None else -1,\n stale_share=int(one['staleShares']) if one['staleShares'] is not None else -1)\n workers.append(worker)\n self.account.update_workers(workers)\n\n def __update_account_payments(self):\n url = self.api + '/miner/' + self.wallet_address + '/payouts'\n data = request_data(url)\n payments = []\n for one in data:\n payment = Payment(\n convert_to_eth(float(one['amount'])),\n True,\n datetime.datetime.fromtimestamp(one['paidOn']))\n payments.append(payment)\n\n self.account.update_payments(payments)\n\n def __get_profit(self):\n return self.account.get_total_payment() * self.price.get_usd_price()\n\n def __update_pool_and_price(self):\n url = self.api + '/poolStats'\n data = request_data(url)\n\n self.hashrate = data['poolStats']['hashRate']\n self.price.update(\n data['price']['usd'],\n data['price']['btc'])\n\n def __update_network(self):\n url = self.api + '/networkStats'\n data = request_data(url)\n\n self.hashrate_percent = self.hashrate / float(data['hashrate']) * 100.0\n\n self.network.update(\n float(data['hashrate']),\n float(data['blockTime']),\n int(data['difficulty']))\n\n def __str__(self):\n s = bold(cyan(self.name)) + '\\n'\n s += bold(cyan('==================')) + '\\n'\n s += '\\n'\n s += bold(white('Pool:')) + '\\n'\n s += '\\t' + 'Hashrate: %s (%.2f%%)' % (format_hashrate(self.hashrate, 1), self.hashrate_percent) + '\\n'\n s += '\\t' + 'Payment Limit: ' + str(self.payment_limit) + '\\n'\n s += '\\n'\n s += str(self.network) + '\\n'\n s += '\\n'\n s += str(self.account) + '\\n'\n s += '\\n'\n s += str(self.price) + '\\n'\n s += '\\n'\n s += str(self.estimation)\n return s\n\n\netn_nanopool = NanoPool('Electroneum (ETN)', 'etn', 1, etn_wallet_address)\npas_nanopool = NanoPool('PascalCoin (PAS)', 'pasc', 1000000, pas_wallet_address)\neth_ethermine = Ethermine('Ethereum (ETH)', eth_wallet_address)\n\n\ndef etn():\n etn_nanopool.update()\n print(str(etn_nanopool))\n\n\ndef pas():\n pas_nanopool.update()\n print(str(pas_nanopool))\n\n\ndef eth():\n eth_ethermine.update()\n print(str(eth_ethermine))\n\n\nif __name__ == '__main__':\n etn()\n while True:\n os.system('clear')\n eth()\n time.sleep(30)\n os.system('clear')\n etn()\n time.sleep(30)\n os.system('clear')\n pas()\n time.sleep(30)\n" } ]
1
TanLucVo/PRN-Logix-InfixToPosfix
https://github.com/TanLucVo/PRN-Logix-InfixToPosfix
71fa474be3f336b112d956d3e3e9481b02d6e590
9f38028306f0cdbae042f54d01b4de2d69af24be
fa9b480406f16423272c3a7c9c1198cb39dbe3ee
refs/heads/master
2020-09-06T00:49:17.728245
2019-11-07T15:22:01
2019-11-07T15:22:01
220,263,552
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43998950719833374, "alphanum_fraction": 0.44732704758644104, "avg_line_length": 28.137405395507812, "blob_id": "20d1d4c889cc8569a663eccd9c137757751cdf27", "content_id": "949d3d5e549a163e45e36648425666c0e588e90e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3816, "license_type": "no_license", "max_line_length": 159, "num_lines": 131, "path": "/Essay.py", "repo_name": "TanLucVo/PRN-Logix-InfixToPosfix", "src_encoding": "UTF-8", "text": "import itertools\ndef readInfix(filename):\n with open(filename) as f:\n Infix = f.readlines()\n return Infix[0]\n##########################################Student do these 2 function\ndef is_operator(c):\n if (c==\"~\" or c==\"&\" or c==\">\" or c==\"|\" or c==\"=\"):\n return True\n return False\ndef precedence(arg):\n if arg == \"=\":\n return 5\n if arg == \">\": \n return 4\n if arg == \"|\": \n return 3\n if arg == \"&\": \n return 2\n if arg == \"~\": \n return 1\n return 6\ndef isNot(c):\n if c ==\"~\":\n return True\n else:\n return False\n\ndef sort_var(Postfix):\n s=[]\n for i in Postfix:\n if (i.isalpha()):\n if(i not in s):\n s.append(i)\n s.sort()\n return s\n\ndef add_result(table,result):\n for i in range(len(table)):\n table[i]+=(result[i],)\n return table\n\ndef find(array,b):\n for i in range(len(array)):\n if b==array[i]:\n return i\ndef Infix2Postfix(Infix):\n stack= []\n Postfix= []\n for i in Infix:\n if is_operator(i):\n while(len(stack) != 0 and ((not isNot(i) and (precedence(i) >= precedence(stack[-1])) or (isNot(i) and (precedence(i) > precedence(stack[-1])))))):\n Postfix.append(stack.pop());\n stack.append(i)\n elif i == \"(\":\n stack.append(i)\n elif i == \")\":\n while stack[-1] != \"(\":\n Postfix.append(stack.pop())\n stack.pop()\n else:\n Postfix.append(i)\n\n while (len(stack) != 0 ) :\n Postfix.append(stack.pop())\n return(Postfix)\ndef Postfix2Truthtable(Postfix):\n b=sort_var(Postfix)\n table = list(itertools.product([False,True],repeat = len(b)))\n stack = []\n \n for i in Postfix:\n result=[a[0] for a in table]\n if not is_operator(i):\n stack.append(i)\n else: \n if i=='~':\n \n left=stack.pop()\n \n if left in b:\n pri2=find(b,left)\n left=[a[pri2] for a in table]\n for j in range(len(left)):\n result[j]= not left[j]\n add_result(table,result)\n stack.append(result)\n else:\n right=stack.pop()\n left=stack.pop()\n count=0\n if right in b:\n pri1=find(b,right)\n right=[a[pri1] for a in table]\n if left in b:\n pri2=find(b,left)\n left=[a[pri2] for a in table]\n if i=='&':\n \n for j in range(len(left)):\n result[j]=left[j] and right[j] \n elif i=='|':\n for j in range(len(left)):\n result[j]=left[j] or right[j]\n elif i=='>':\n for j in range(len(left)):\n result[j]=(not left[j]) or right[j]\n elif i=='=':\n for j in range(len(left)):\n result[j]=((not left[j]) or right[j]) and ((not right[j]) or left[j])\n stack.append(result)\n add_result(table,result)\n return table\n##########################################End student part\ndef writeTruthtable(table):\n import sys\n outfile=sys.argv[0]\n outfile=outfile[0:-2]\n outfile+=\"txt\" \n with open(outfile, 'w') as f:\n for lines in table:\n for item in lines:\n f.write(\"%s\\t\" % item)\n f.write(\"\\n\")\n f.close()\ndef main():\n Infix=readInfix(\"Logicexpression.txt\")\n Postfix=Infix2Postfix(Infix)\n Truthtable=Postfix2Truthtable(Postfix)\n writeTruthtable(Truthtable)\nmain()" } ]
1
zzzzz167/linnian-qwq-bot
https://github.com/zzzzz167/linnian-qwq-bot
da5c45ea7355ced191cb5ab33b5b52c975ddeb9e
b8b2fc110643da77291efcfac3160fb023d6ba94
275e4bc748118ced637936ba16b7425b4039474a
refs/heads/master
2023-07-20T17:41:29.106225
2021-08-21T05:40:51
2021-08-21T05:40:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46657249331474304, "alphanum_fraction": 0.47128060460090637, "avg_line_length": 36.26315689086914, "blob_id": "2912667922e67675e8fe0f3880b32f7b0db7ed70", "content_id": "c9866279aaf0f709dcf1f136b797f35003cf168b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4494, "license_type": "no_license", "max_line_length": 86, "num_lines": 114, "path": "/module/tickets.py", "repo_name": "zzzzz167/linnian-qwq-bot", "src_encoding": "UTF-8", "text": "ABLE = False\nfrom pathlib import Path\nfrom graia.saya import Saya, Channel\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nimport random\nfrom avilla.execution.message import MessageSend\nfrom avilla.message.chain import MessageChain\nfrom avilla.relationship import Relationship\nfrom avilla.builtins.profile import MemberProfile, GroupProfile\nfrom avilla.builtins.elements import PlainText, Notice\nfrom avilla.builtins.elements import Image as IMG\nfrom avilla.event.message import MessageEvent\nfrom lib.bank import Bank\nfrom lib import limiter\n\nsaya = Saya.current()\nchannel = Channel.current()\nbank = Bank(\"./data/bank.json\")\ntickets = {}\nticket_list = []\nlast_result = {}\n\n\[email protected](ListenerSchema(listening_events=[MessageEvent]))\nasync def sendmsg(event: MessageEvent, rs: Relationship[MemberProfile, GroupProfile]):\n if event.message.as_display().startswith(\"#许愿券 \"):\n await limiter.limit(\"wish\", rs, 12)\n ctx = event.message.get_first(PlainText).text[5:]\n if ctx.startswith(\"购买 \"):\n ticketnumber = str(int(ctx[3:]))\n if len(tickets) == 5:\n await rs.exec(\n MessageSend(\n MessageChain.create([PlainText(\"许愿券超过5张了UWU,请发送#许愿券 开奖\")])\n )\n )\n pass\n if int(ticketnumber) < 0:\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"不能为负数哦 UwU\")]))\n )\n pass\n elif not len(ticketnumber) == 6:\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"许愿券码为6位数 UwU\")]))\n )\n pass\n\n elif rs.ctx.id in tickets.keys():\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"你已经购买了哦 UwU\")]))\n )\n pass\n elif ticketnumber in tickets.values():\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"已经被人购买了哦 UwU\")]))\n )\n pass\n\n else:\n try:\n await bank.withdraw(rs.ctx.id, 6)\n tickets[rs.ctx.id] = ticketnumber\n ticket_list.append(rs.ctx.id)\n await rs.exec(\n MessageSend(\n MessageChain.create(\n [\n PlainText(\n f\"许愿券购买成功,现在共有{len(tickets)} 张许愿券,到第5张许愿券开奖\"\n )\n ]\n )\n )\n )\n except ValueError:\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"余额不足哦 UwU\")]))\n )\n if ctx.startswith(\"开奖\"):\n if len(tickets) != 5:\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"许愿券还没有开奖 UwU\")]))\n )\n else:\n\n ticket = random.choice(ticket_list)\n ticket_num = tickets[ticket]\n jid = len(last_result.keys())\n await rs.exec(\n MessageSend(\n MessageChain.create(\n [\n PlainText(\n \"中奖许愿券为{} UwU,本次抽奖id为{}\".format(ticket_num, jid)\n )\n ]\n )\n )\n )\n try:\n await bank.deposit(id, 60)\n except:\n await bank.create_account(id, 160)\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(\"已向许愿成功的人转入60霖念币\")]))\n )\n last_result[jid] = \"id为{}中奖许愿券为{} UwU\".format(jid, ticket_num)\n tickets.clear()\n if ctx.startswith(\"查询 \"):\n jid = int(ctx[3:])\n await rs.exec(\n MessageSend(MessageChain.create([PlainText(last_result[jid])]))\n )\n" }, { "alpha_fraction": 0.7827585935592651, "alphanum_fraction": 0.7873563170433044, "avg_line_length": 36.826087951660156, "blob_id": "bd682a14a47e01a92559e5551790f360b600b7d4", "content_id": "9795c91795709025ace43dcab86025d97e57aa31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "no_license", "max_line_length": 70, "num_lines": 23, "path": "/module/yingyu.py", "repo_name": "zzzzz167/linnian-qwq-bot", "src_encoding": "UTF-8", "text": "from avilla.builtins.elements import PlainText\nfrom avilla.builtins.profile import GroupProfile, MemberProfile\nfrom avilla.event.message import MessageEvent\nfrom avilla.execution.message import MessageSend\nfrom avilla.message.chain import MessageChain\nfrom avilla.relationship import Relationship\nfrom graia.saya import Channel, Saya\nfrom lib.yinglish import yinglish\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\n\nsaya = Saya.current()\nchannel = Channel.current()\n\n\[email protected](ListenerSchema(listening_events=[MessageEvent]))\nasync def handle_message(\n event: MessageEvent, rs: Relationship[MemberProfile, GroupProfile]\n):\n if not event.message.as_display().startswith(\"#变坏 \"):\n return\n msg = event.message.as_display()[4:]\n resp = yinglish.chs2yin(msg, 1.0)\n await rs.exec(MessageSend(MessageChain.create([PlainText(resp)])))\n" }, { "alpha_fraction": 0.7292202115058899, "alphanum_fraction": 0.7300771474838257, "avg_line_length": 37.900001525878906, "blob_id": "3f7cf354f6e858f15f08ad5268130743b2bd5cdd", "content_id": "a93300102a41410fae688c70f4fc33699f00468f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 98, "num_lines": 30, "path": "/module/prme.py", "repo_name": "zzzzz167/linnian-qwq-bot", "src_encoding": "UTF-8", "text": "from os import write\nfrom graia.saya import Saya, Channel\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nfrom avilla.execution.message import MessageSend\nfrom avilla.message.chain import MessageChain\nfrom avilla.relationship import Relationship\nfrom avilla.builtins.profile import MemberProfile, GroupProfile\nfrom avilla.builtins.elements import PlainText, Notice\nfrom avilla.builtins.elements import Image\nfrom avilla.event.message import MessageEvent\nfrom lib.bank import Bank\nfrom lib.limiter import limit\nimport aiohttp\n\nsaya = Saya.current()\nchannel = Channel.current()\n\n\[email protected](ListenerSchema(listening_events=[MessageEvent]))\nasync def message_event_logger(event: MessageEvent, rs: Relationship[MemberProfile,GroupProfile]):\n if event.message.as_display() == \"舔我\":\n await limit(\"prme\", rs, 8)\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://chp.shadiao.app/api.php\") as resp:\n data = await resp.text()\n await rs.exec(\n MessageSend(\n MessageChain.create([Notice(target=rs.ctx.id), PlainText(\" \" + data)])\n )\n )\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7024844884872437, "avg_line_length": 31.85714340209961, "blob_id": "7028c67de11b3468fffd6b059027cdf922aa2fd7", "content_id": "9f0d07684518e1bfedd333e70a2519489b9c030a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 102, "num_lines": 49, "path": "/module/rss/__init__.py", "repo_name": "zzzzz167/linnian-qwq-bot", "src_encoding": "UTF-8", "text": "from pathlib import Path\nimport aiohttp\nimport re\nimport os\nfrom avilla.builtins.profile import GroupProfile, MemberProfile\nfrom avilla.provider import RawProvider\nimport loguru\nimport yaml\nimport asyncio\nfrom lib import rss_parsers\nfrom avilla.builtins.elements import Image, PlainText\nfrom avilla.event.message import MessageEvent\nfrom avilla.relationship import Relationship\nfrom graia.saya import Saya, Channel\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nfrom avilla.message.chain import MessageChain\nfrom avilla.exceptions import AccountMuted\nfrom lib import img2text\nfrom avilla.execution.message import MessageSend\n\n\n\n\nsaya = Saya.current()\nchannel = Channel.current()\n\nrss_config = yaml.load(Path(\"./configs/rss.yml\").read_text(),yaml.SafeLoader)\[email protected](ListenerSchema(\n listening_events=[MessageEvent]\n )\n)\nasync def network_compiler(\n event: MessageEvent,\n rs: Relationship[MemberProfile,GroupProfile]\n):\n if event.message.as_display().startswith(\"#lofter \"):\n msg = event.message.as_display().replace(\"#lofter \", \"\")\n cmd = msg.split(\" \")\n async def parse(id):\n try:\n resp = await rss_parsers.lofter_parser(f\"https://rsshub.rainlong.cn/lofter/user/{id}\")\n await rs.exec(MessageSend(MessageChain.create([Image(RawProvider( resp))])))\n except:\n await rs.exec(MessageSend(MessageChain.create([PlainText(\"貌似在Lofter没有这个id\")])))\n\n if cmd[0] == \"解析\":\n loguru.logger.info(f\"{rs.ctx.id} 解析 {cmd[1]}\")\n id = cmd[1]\n await parse(id)\n" }, { "alpha_fraction": 0.7358317971229553, "alphanum_fraction": 0.7449725866317749, "avg_line_length": 36.72413635253906, "blob_id": "e01edb04af0bd4028f802d789c4b5be15832aa42", "content_id": "8807bdf0094ba818ab87606e2307e477ef6db748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 98, "num_lines": 29, "path": "/module/longtu.py", "repo_name": "zzzzz167/linnian-qwq-bot", "src_encoding": "UTF-8", "text": "from os import write\nfrom avilla.provider import HttpGetProvider\nfrom graia.saya import Saya, Channel\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nfrom avilla.execution.message import MessageSend\nfrom avilla.message.chain import MessageChain\nfrom avilla.relationship import Relationship\nfrom avilla.builtins.profile import MemberProfile, GroupProfile\nfrom avilla.builtins.elements import PlainText, Notice\nfrom avilla.builtins.elements import Image\nfrom avilla.event.message import MessageEvent\nfrom lib.bank import Bank\nfrom lib.limiter import limit\nimport aiohttp\n\nsaya = Saya.current()\nchannel = Channel.current()\n\n\[email protected](ListenerSchema(listening_events=[MessageEvent]))\nasync def message_event_logger(event: MessageEvent, rs: Relationship[MemberProfile,GroupProfile]):\n if event.message.as_display() == \"来点龙图\" and rs.ctx.profile.group.id == \"1067987419\":\n await rs.exec(\n MessageSend(\n MessageChain.create(\n [Image(HttpGetProvider(\"https://lt.linnian.icu/api/get\"))]\n )\n )\n )\n" } ]
5
gmadhudatascientist/HAM_SPAM_Mail_Classification
https://github.com/gmadhudatascientist/HAM_SPAM_Mail_Classification
e914cb79477eeea937bd2e2878a4678551b9ca08
a2f5828137a22e364c836f2eaa6d9ccdb2025dc0
b2a4f9d87d1c60c9a9e788236e6b08ba1de64aee
refs/heads/main
2023-08-19T08:33:09.726793
2021-09-11T00:57:21
2021-09-11T00:57:21
405,247,802
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7735334038734436, "alphanum_fraction": 0.7912687659263611, "avg_line_length": 43.9375, "blob_id": "d2258714f9464d4b3c9220002964795191b67b53", "content_id": "9aea7a49bb23bb4dde21c4292ebe1b5f20b20c0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 733, "license_type": "no_license", "max_line_length": 94, "num_lines": 16, "path": "/Readme.txt", "repo_name": "gmadhudatascientist/HAM_SPAM_Mail_Classification", "src_encoding": "UTF-8", "text": "This is a PoC project.The goal of the project\r\nis to classify the mail is HAM or SPAM. We tried different classifiers\r\nbut finally we found that Nive bayes algorithm is giving good accuracy compared to all models.\r\nSteps involved to solve this problem:\r\n1.Import the libraries\r\n2.Load the dataset\r\n3.clean the data\r\n4.load the custom built Stopwords\r\n5.splitting data into train and test data sets \r\n6.Learning Term weighting and normalizing on entire emails\r\n7.Preparing a naive bayes model on training data set \r\n8.Predict the results of train and test datasets\r\n9.Training and Test Data accuracy\r\n\r\nFinally we got test data accuracy is 96% and train data accuracy is 96%\r\nSo finally proved that this model is giving best accuracy." }, { "alpha_fraction": 0.7025017142295837, "alphanum_fraction": 0.7089249491691589, "avg_line_length": 30.130434036254883, "blob_id": "e43d44ccf8ac879d447abd5119c525bb3168556b", "content_id": "2a427bddd14ecbca2167fe9c838b343712d526f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2958, "license_type": "no_license", "max_line_length": 86, "num_lines": 92, "path": "/NB_spam_ham.py", "repo_name": "gmadhudatascientist/HAM_SPAM_Mail_Classification", "src_encoding": "UTF-8", "text": "\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer\r\n\r\n# Loading the data set\r\nemail_data = pd.read_csv(\"sms_raw_NB.csv\",encoding = \"ISO-8859-1\")\r\n\r\n# cleaning data \r\nimport re\r\nstop_words = []\r\n# Load the custom built Stopwords\r\nwith open(\"stopwords.txt\",\"r\") as sw:\r\n stop_words = sw.read()\r\n\r\nstop_words = stop_words.split(\"\\n\")\r\n \r\ndef cleaning_text(i):\r\n i = re.sub(\"[^A-Za-z\" \"]+\",\" \",i).lower()\r\n i = re.sub(\"[0-9\" \"]+\",\" \",i)\r\n w = []\r\n for word in i.split(\" \"):\r\n if len(word)>3:\r\n w.append(word)\r\n return (\" \".join(w))\r\n\r\n# testing above function with sample text => removes punctuations, numbers\r\ncleaning_text(\"Hope you are having a good week. Just checking in\")\r\ncleaning_text(\"hope i can understand your feelings 123121. 123 hi how .. are you?\")\r\ncleaning_text(\"Hi how are you, I am good\")\r\n\r\nemail_data.text = email_data.text.apply(cleaning_text)\r\n\r\n# removing empty rows\r\nemail_data = email_data.loc[email_data.text != \" \",:]\r\n\r\n# CountVectorizer\r\n# Convert a collection of text documents to a matrix of token counts\r\n\r\n# splitting data into train and test data sets \r\nfrom sklearn.model_selection import train_test_split\r\n\r\nemail_train, email_test = train_test_split(email_data, test_size = 0.2)\r\n\r\n# creating a matrix of token counts for the entire text document \r\ndef split_into_words(i):\r\n return [word for word in i.split(\" \")]\r\n\r\n# Defining the preparation of email texts into word count matrix format - Bag of Words\r\nemails_bow = CountVectorizer(analyzer = split_into_words).fit(email_data.text)\r\n\r\n# Defining BOW for all messages\r\nall_emails_matrix = emails_bow.transform(email_data.text)\r\n\r\n# For training messages\r\ntrain_emails_matrix = emails_bow.transform(email_train.text)\r\n\r\n# For testing messages\r\ntest_emails_matrix = emails_bow.transform(email_test.text)\r\n\r\n# Learning Term weighting and normalizing on entire emails\r\ntfidf_transformer = TfidfTransformer().fit(all_emails_matrix)\r\n\r\n# Preparing TFIDF for train emails\r\ntrain_tfidf = tfidf_transformer.transform(train_emails_matrix)\r\ntrain_tfidf.shape # (row, column)\r\n\r\n# Preparing TFIDF for test emails\r\ntest_tfidf = tfidf_transformer.transform(test_emails_matrix)\r\ntest_tfidf.shape # (row, column)\r\n\r\n# Preparing a naive bayes model on training data set \r\n\r\nfrom sklearn.naive_bayes import MultinomialNB as MB\r\n\r\n# Multinomial Naive Bayes\r\nclassifier_mb = MB()\r\nclassifier_mb.fit(train_tfidf, email_train.type)\r\n\r\n# Evaluation on Test Data\r\ntest_pred_m = classifier_mb.predict(test_tfidf)\r\naccuracy_test_m = np.mean(test_pred_m == email_test.type)\r\naccuracy_test_m\r\n\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(test_pred_m, email_test.type) \r\n\r\npd.crosstab(test_pred_m, email_test.type)\r\n\r\n# Training Data accuracy\r\ntrain_pred_m = classifier_mb.predict(train_tfidf)\r\naccuracy_train_m = np.mean(train_pred_m == email_train.type)\r\naccuracy_train_m\r\n" }, { "alpha_fraction": 0.7698412537574768, "alphanum_fraction": 0.8253968358039856, "avg_line_length": 62, "blob_id": "752f229bd00c9a29477fc2de775b4ba05197b675", "content_id": "1c2647d326fdba66d9cb74f9a2f621b5e83f857b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 94, "num_lines": 2, "path": "/README.md", "repo_name": "gmadhudatascientist/HAM_SPAM_Mail_Classification", "src_encoding": "UTF-8", "text": "# HAM_SPAM_Mail_Classification\n(https://github.com/gmadhudatascientist/HAM_SPAM_Mail_Classification/files/7146971/Readme.txt)\n" } ]
3
jackbhuber/stress-analysis-using-python-with-machine-learning
https://github.com/jackbhuber/stress-analysis-using-python-with-machine-learning
972a6e49db8aeb2cea981b77c57e4c7b1b16212b
086ef14ca8cd60adad82c7f03f7f50a660b3895c
9ff65cbf75a3b3123c136cd32c1155d6c3e80b85
refs/heads/master
2022-03-25T01:36:54.690985
2019-12-02T11:26:50
2019-12-02T11:26:50
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.675805389881134, "alphanum_fraction": 0.7204574942588806, "avg_line_length": 90.93413543701172, "blob_id": "8a5d54c2f0c6857f2c9332eb817946a6a4f67a07", "content_id": "897db367bb75faafde8c80630afee7c0a3783db5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31058, "license_type": "no_license", "max_line_length": 281, "num_lines": 334, "path": "/fr1.py", "repo_name": "jackbhuber/stress-analysis-using-python-with-machine-learning", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import r2_score\r\n\r\n\r\ndef random_forest(Headache1, Job_Awareness1, Academic_Pressure1, Vocal_Expression1,Unhealhty_Influence1, Workload1, Anxiety1, Physical_Health1,Relationship_at_Work1, Work_Stress1, Digital_Distraction1,Sleep_Hours1, Time_Pressure1, Financial_Pressure1,Tech_Obligations1):\r\n global list3\r\n \r\n data = pd.read_csv('Updated.csv')\r\n data.drop(['Unnamed: 0','Age','Gender','Class','Year','Weather','Seeking_Help','Faking','Self_Awareness','Screen_Time','Parental_Pressure'],axis=1,inplace=True)\r\n data.to_csv('Update1.csv')\r\n dp = pd.read_csv(\"Update1.csv\")\r\n dp.columns.values\r\n features = dp.columns[1:16]\r\n X = dp[features]\r\n X \r\n Y = dp['Result']\r\n Y\r\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20)\r\n X_train\r\n Y_train\r\n regressor = RandomForestRegressor(n_estimators=20, random_state=0)\r\n regressor.fit(X_train,Y_train)\r\n Y.pred= regressor.predict(X_test)\r\n r2=r2_score(Y_test,Y.pred)\r\n print(r2)\r\n global a \r\n a = regressor.predict([[Headache1, Job_Awareness1, Academic_Pressure1, Vocal_Expression1,Unhealhty_Influence1, Workload1, Anxiety1, Physical_Health1,Relationship_at_Work1, Work_Stress1, Digital_Distraction1,Sleep_Hours1, Time_Pressure1, Financial_Pressure1,Tech_Obligations1]])\r\n \r\n print(type(a))\r\n print(a)\r\n\r\ndef raise_frame(frame):\r\n frame.tkraise()\r\n\r\ndef fetch():\r\n Headache1 = Headache.get()\r\n Job_Awareness1 = Job_Awareness.get()\r\n Academic_Pressure1 = Academic_Pressure.get()\r\n Vocal_Expression1 = Vocal_Expression.get()\r\n Unhealhty_Influence1 = Unhealhty_Influence.get()\r\n Workload1 = Workload.get()\r\n Anxiety1 = Anxiety.get()\r\n Physical_Health1 = Physical_Health.get()\r\n Relationship_at_Work1 = Relationship_at_Work.get()\r\n Work_Stress1 = Work_Stress.get()\r\n Digital_Distraction1 = Digital_Distraction.get()\r\n Sleep_Hours1 = Sleep_Hours.get()\r\n Time_Pressure1 = Time_Pressure.get()\r\n Financial_Pressure1 = Financial_Pressure.get()\r\n Tech_Obligations1 = Tech_Obligations.get()\r\n pred = random_forest(Headache1, Job_Awareness1, Academic_Pressure1, Vocal_Expression1,Unhealhty_Influence1, Workload1, Anxiety1, Physical_Health1,Relationship_at_Work1, Work_Stress1, Digital_Distraction1,Sleep_Hours1, Time_Pressure1, Financial_Pressure1,Tech_Obligations1)\r\n\r\n show = Tk()\r\n show.geometry(\"850x500\")\r\n show.config(bg=\"alice blue\")\r\n show.resizable(False,True)\r\n show.title(\"Details\")\r\n vary=80\r\n headlb=Label(show,text=\"Your Stress Score\",font=\"Times 25 bold\",bg=\"alice blue\").grid(row=0,column=1,padx=200)\r\n \r\n #-------------------------------Label of the Table---------------------------------#\r\n \r\n lb1 = Label(show,text=\"Your stress level is : \",width=20,bd=5,bg=\"alice blue\").place(x=2,y=50)\r\n lb2 = Label(show,text=\"Recommendation :\",width=20,bd=5,bg=\"alice blue\").place(x=2,y=200)\r\n\r\n if(a>=1.00 and a<=2.33):\r\n lb1 = Label(show,text=\"Low Stress\",width=20,bd=5,bg=\"alice blue\").place(x=2,y=80)\r\n lb2 = Label(show,text=\"* Stay Positive: Keep up the optimistic approch towards different situations.\",width=60,bd=5,bg=\"alice blue\").place(x=5,y=230) \r\n lb3 = Label(show,text=\"* Eat a Balanced Diet: A poor diet can bring greater reactivity toward stress.\",width=60,bd=5,bg=\"alice blue\").place(x=5,y=260)\r\n lb4 = Label(show,text=\"* Make Time for Leisure Activities:Leisure activities can be a wonderful way to relieve stress.\",width=72,bd=5,bg=\"alice blue\").place(x=5,y=290)\r\n lb5 = Label(show,text=\"\"\"* Cut Out Things That Add to Your Stress: Get rid of the things that are adding to your\r\n stress so you can experience more peace. \"\"\",width=68,bd=5,bg=\"alice blue\").place(x=5,y=320)\r\n lb6 = Label(show,text=\"\"\"* Exercise: follow a systematic exercise routine.\"\"\",width=40,bd=5,bg=\"alice blue\").place(x=5,y=360)\r\n\r\n \r\n if(a>=2.34 and a<=3.67):\r\n lb1 = Label(show,text=\"Moderate Stress\",width=20,bd=5,bg=\"alice blue\").place(x=2,y=80)\r\n lb2 = Label(show,text=\"* Try yoga: Yoga brings together physical and mental disciplines which may help you achieve peacefulness of body and mind.\",width=95,bd=5,bg=\"alice blue\").place(x=5,y=230) \r\n lb3 = Label(show,text=\"\"\"* Enjoy Aromatherapy: whether you enjoy candles, diffusers, or body products, consider\r\n incorporating some aromatherapy into your day.\"\"\",width=66,bd=5,bg=\"alice blue\").place(x=5,y=260)\r\n lb4 = Label(show,text=\"\"\"* Laugh more: Laughter fires up and then cools down your stress response. So read some jokes, tell\r\n some jokes, watch a comedy or hang out with your funny friends.\"\"\",width=73,bd=5,bg=\"alice blue\").place(x=5,y=300)\r\n lb5 = Label(show,text=\"\"\"* Reduce Your Caffeine Intake: Caffeine is a stimulant found in coffee, tea, chocolate and energy\r\n drinks. High doses can increase anxiety.\"\"\",width=72,bd=5,bg=\"alice blue\").place(x=5,y=340)\r\n lb6 = Label(show,text=\"\"\"* Get musical and be creative: Listening to or playing music is a good stress reliever because it\r\n can provide a mental distraction.If music isn't one of your interests, turn your attention to\r\n another hobby you enjoy, such as gardening, sewing, sketching .\"\"\",width=70,bd=5,bg=\"alice blue\").place(x=5,y=380)\r\n\r\n\r\n if(a>=3.68 and a<=5.00):\r\n lb1 = Label(show,text=\"High Stress\",width=20,bd=5,bg=\"alice blue\").place(x=2,y=80)\r\n lb2 = Label(show,text=\"* Seek counseling: Professional counselors or therapists can help you identify sources of your stress and learn new coping tools.\",width=95,bd=5,bg=\"alice blue\").place(x=5,y=230) \r\n lb3 = Label(show,text=\"* Develop a Positive Self-Talk Habit: It's important to learn to talk to yourself in a more realistic manner and introspect.\",width=88,bd=5,bg=\"alice blue\").place(x=5,y=260)\r\n lb4 = Label(show,text=\"\"\"* Meditate: Meditation can instill a sense of calm, peace and balance that can benefit both your\r\n emotional well-being and your overall health.\"\"\",width=72,bd=5,bg=\"alice blue\").place(x=5,y=290)\r\n lb5 = Label(show,text=\"\"\"* Connect with others: Reach out to family and friends and make social connections.Social contact\r\n is a good stress reliever because it can offer distraction\"\"\",width=75,bd=5,bg=\"alice blue\").place(x=5,y=330)\r\n lb6 = Label(show,text=\"\"\"* Get active: Any form of physical activity can act as a stress reliever. Even if you're not an\r\n athlete or you're out of shape, exercise can still be a good stress reliever.\"\"\",width=68,bd=5,bg=\"alice blue\").place(x=5,y=370)\r\n\r\n \r\nwin=Tk()\r\nwin.title(\"Stress Analysis\")\r\nwin.geometry(\"1000x500\")\r\n\r\n\r\nf0=Frame(win)\r\nf0.config(bg=\"alice blue\")\r\nf1=Frame(win)\r\nf1.config(bg=\"alice blue\")\r\nf2=Frame(win)\r\nf2.config(bg=\"alice blue\")\r\nf3=Frame(win)\r\nf3.config(bg=\"alice blue\")\r\nf4=Frame(win)\r\nf4.config(bg=\"alice blue\")\r\nf5=Frame(win)\r\nf5.config(bg=\"alice blue\")\r\nf6=Frame(win)\r\nf6.config(bg=\"alice blue\")\r\nf7=Frame(win)\r\nf7.config(bg=\"alice blue\")\r\nf8=Frame(win)\r\nf8.config(bg=\"alice blue\")\r\nf9=Frame(win)\r\nf9.config(bg=\"alice blue\")\r\nf10=Frame(win)\r\nf10.config(bg=\"alice blue\")\r\nf11=Frame(win)\r\nf11.config(bg=\"alice blue\")\r\nf12=Frame(win)\r\nf12.config(bg=\"alice blue\")\r\nf13=Frame(win)\r\nf13.config(bg=\"alice blue\")\r\nf14=Frame(win)\r\nf14.config(bg=\"alice blue\")\r\nf15=Frame(win)\r\nf15.config(bg=\"alice blue\")\r\n\r\nfor frame in (f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15):\r\n frame.grid(row=0,column=0,sticky=N+S+E+W)\r\n \r\nlb=Label(f0,text=\"STRESS ANALYSIS\",font=(\"arial\",30,\"bold\",\"underline\",\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",padx=350,pady=12)\r\nlb1=Label(f0,text='''Stress is called \"THE INVISIBLE\" as it is often overlooked but one of the major reason behind\r\ndeteriorating mental health is stress. It affects one's mental and physical well being.''',font=(\"arial\",15,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",padx=5,pady=12)\r\nlb2=Label(f0,text='• Answer carefully, You cannot jump back to the previous question.',font=(\"arial\",15,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",padx=5,pady=12)\r\nlb3=Label(f0,text='• Select the best suited option as per real life scenarios',font=(\"arial\",15,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",padx=5,pady=12)\r\nlb4=Label(f0,text='• Answer the questions with utmost honesty',font=(\"arial\",15,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",padx=5,pady=12)\r\nlb5=Label(f0,text= 'To test your stress level take up this test.',font=(\"arial\",15,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",padx=5,pady=12)\r\nbtn=Button(f0,text=\"Start Test\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f1)).pack(anchor=\"w\",padx=370,pady=12)\r\n\r\n\r\n\r\n\r\nlb=Label(f1,text=\"\"\"1.When a person is constantly shouting at you, your head starts aching very much.\r\nOn a general basis when you suffer from a headache, what symptoms most likely occur\"\"\",font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nHeadache=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f1,text='''Just headache, nothing else''',padx=14,variable=Headache,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f1,text='''A pounding, throbbing pain and irritation due to light''',padx=14,variable=Headache,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f1,text='''Pain along with piercing pain behind the eye(s)''',padx=14,variable=Headache,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f1,text='''I usually have pain in the chest or bridge of the nose area as well''',padx=14,variable=Headache,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f1,text='''I usually go through more than one symptoms from the above options''',padx=14,variable=Headache,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f1,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f2)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f2,text='''2.You are new to a job, you’ve been here since a few months.Its a little different from your previous office,\r\nthe office culture and procedures and a little different.Your new boss has put you on a bigger assignment.\r\nIn this different situation, what will you do?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=12).pack(anchor=\"w\")\r\nJob_Awareness=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f2,text='''Will you be able to understand all the different situations and expectations and\r\nwork your way through this different environment.''',font=(\"arial\",14,\"italic\"),bg=\"alice blue\",padx=14,variable=Job_Awareness,value=1,pady=5).pack(anchor=\"w\")\r\nr1=Radiobutton(f2,text='''Will you understand most tasks and ask for help from others or\r\ntry to understand yourself.''',padx=14,variable=Job_Awareness,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=5).pack(anchor=\"w\")\r\nr1=Radiobutton(f2,text='''Will you have difficulty with most tasks but you’ll be able to\r\nask for help from your colleagues.''',padx=14,variable=Job_Awareness,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=5).pack(anchor=\"w\")\r\nr1=Radiobutton(f2,text='''Will you not ask anyone but stay at the same job hoping you’ll\r\nfigure it out.''',padx=14,variable=Job_Awareness,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=5).pack(anchor=\"w\")\r\nr1=Radiobutton(f2,text='''will you leave the job.''',padx=14,variable=Job_Awareness,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=5).pack(anchor=\"w\")\r\nbtn=Button(f2,text=\"Next \",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f3)).pack(anchor=\"w\",padx=400)\r\n\r\nlb=Label(f3,text='''3.Even after studying for long you failed a subject.Now you are putting in extra efforts for all the subjects.\r\nFear of failure to do well in studies in general makes you feel?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nAcademic_Pressure=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f3,text=\"Motivated to do better\",padx=14,variable=Academic_Pressure,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f3,text=\"Unpleasant but I recover from it in a healthy way.\",padx=14,variable=Academic_Pressure,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f3,text=\"Put in the same effort and time as before.\",padx=14,variable=Academic_Pressure,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f3,text=\"Confused as I am unable to judge myself.\",padx=14,variable=Academic_Pressure,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f3,text=\"Really anxious and unable to cope up.\",padx=14,variable=Academic_Pressure,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f3,text=\"Next \",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f4)).pack(anchor=\"w\",pady=14,padx=400)\r\nlb=Label(f4,text='''4.You have been having a really hard time lately, you feel very tired mentally and physically. You\r\ncome home from work/college/school one day and your mother just asks how your day has been.\r\nWhat will you do in this situation?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nVocal_Expression=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f4,text='Will you just tell her how you’ve been feeling,including everything and seek professional help.',padx=14,variable=Vocal_Expression,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f4,text='Will you open up a little about the underlying problems but not entirely at once.',padx=14,variable=Vocal_Expression,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\") \r\nr1=Radiobutton(f4,text=\"will you tell her that your day was shitty and talk to her a little.\",padx=14,variable=Vocal_Expression,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f4,text=\"will you just reply something like “it was fine”.\",padx=14,variable=Vocal_Expression,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f4,text=\"will you not say anything and go to the room.\",padx=14,variable=Vocal_Expression,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f4,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f5)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f5,text='''5.You open your instagram/facebook account and see your friends posting their vacation pictures and\r\nall of them look happy .Whereas you were at your home for the whole period of vacations.\r\nWhat will be your first reaction after seeing them?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nUnhealhty_Influence=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f5,text=\"Not at all get affected by them as it was your own choice to be at home.\",padx=14,variable=Unhealhty_Influence,value=1,font=(\"lucida\",15),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f5,text=\"Like all of their pictures but with envy.\",padx=14,variable=Unhealhty_Influence,value=2,font=(\"lucida\",15),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f5,text=\"Get upset and let the pictures affect your whole day.\",padx=14,variable=Unhealhty_Influence,value=3,font=(\"lucida\",15),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f5,text=\"Get upset and pick a fight with them as they didnt ask you to join them.\",padx=14,variable=Unhealhty_Influence,value=4,font=(\"lucida\",15),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f5,text=\"Cringe on their back and try to stay happy with them in their happiness.\",padx=14,variable=Unhealhty_Influence,value=5,font=(\"lucida\",15),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f5,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f6)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f6,text='''6.Wanting to do many a things you take up more than you can handle at the moment.Do you neglect some\r\nof the work taken by you when you feel you have a lot to do?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nWorkload=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f6,text=\"I complete all the work that I take.\",padx=14,variable=Workload,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f6,text='I tend not to drop out something, but then I prioritize and may leave the work at the least priority.',padx=14,variable=Workload,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f6,text=\"I drop the work sometimes when I feel it is too much to handle.\",padx=14,variable=Workload,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f6,text=\"I drop off majority of the work I take.\",padx=14,variable=Workload,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f6,text=\"I dont take up work at all as I have much to do already.\",padx=14,variable=Workload,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f6,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f7)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f7,text='''7.You are very much worthy of a job but just before your interview a random person looks down at you\r\nand start laughing at your resume. What will you do?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nAnxiety=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f7,text='Get a boost in your confidence from it and try to perform better in the interview.',padx=14,variable=Anxiety,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f7,text='Be a little bit nervous before the interview and gain your confidence back in the interview.',padx=14,variable=Anxiety,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f7,text=\"Not let anyone affect your self confidence.\",padx=14,variable=Anxiety,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\")\r\nr1=Radiobutton(f7,text=\"Get a little bit affected by it and a little bit of nervousness creeps in.\",padx=14,variable=Anxiety,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f7,text=\"Get completely rattled by the person and lose all your confidence.\",padx=14,variable=Anxiety,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f7,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f8)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f8,text='8.If given an option to work out 4-5 days in a week or not working out at all,what will you do?',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nPhysical_Health=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f8,text=\"I will be working out 6 days a week no matter what.\",padx=14,variable=Physical_Health,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f8,text=\"If the time permits I will work out.\",padx=14,variable=Physical_Health,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f8,text=\"I will work out 2-3 days in a week.\",padx=14,variable=Physical_Health,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f8,text=\"I will rarely work out.\",padx=14,variable=Physical_Health,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f8,text=\"I will sleep in that time.\",padx=14,variable=Physical_Health,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f8,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f9)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f9,text='''9.Relationships amongst the friends/teachers/colleagues are most of the time under a lot of distress.\r\nHow often do you encounter such instances?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nRelationship_at_Work=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f9,text=\"Such things never happened with me.\",padx=14,variable=Relationship_at_Work,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f9,text=\"Such situations can be handled on a personal understanding.\",padx=14,variable=Relationship_at_Work,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f9,text='It may happen when there are different point of views that a little bit of distress in the relationships may come.',padx=14,variable=Relationship_at_Work,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f9,text=\"Many a times such situations arise which lead to quarrels.\",padx=14,variable=Relationship_at_Work,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f9,text=\"I am in a fight almost all the time.\",padx=14,variable=Relationship_at_Work,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f9,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f10)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f10,text='''10.Your company seems to be growing and doing better.Your company just got a lot of new assignments\r\nand projects. Everyone has a lot of workload. How will you deal with this situation?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=12).pack(anchor=\"w\")\r\nWork_Stress=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f10,text='''Will you deal with the stress at work in a healthy way and work your way through this difficult\r\ntimes at the office.''',padx=14,variable=Work_Stress,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=7).pack(anchor=\"w\")\r\nr1=Radiobutton(f10,text='Will you deal with your stress on a surface level and try to work through things for time being.',padx=14,variable=Work_Stress,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=7).pack(anchor=\"w\")\r\nr1=Radiobutton(f10,text=\"Will you just keep working without working on dealing with your stress.\",padx=14,variable=Work_Stress,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=7).pack(anchor=\"w\")\r\nr1=Radiobutton(f10,text='''Will you work on some assignments but not others and let this affect your efficiency and productivity\r\nwithout dealing with the stress.''',padx=14,variable=Work_Stress,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=7).pack(anchor=\"w\")\r\nr1=Radiobutton(f10,text=\"Will you just stop working.\",padx=14,variable=Work_Stress,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=7).pack(anchor=\"w\")\r\nbtn=Button(f10,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f11)).pack(anchor=\"w\",padx=400,pady=7)\r\n\r\nlb=Label(f11,text='''11.After taking up a work, just after 5 minutes into it you are wanting to check up on your phone.\r\nHow often do you give up on the need to check the device?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nDigital_Distraction=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f11,text='I will first complete all the chores then if time permitts then use my device.',padx=14,variable=Digital_Distraction,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f11,text=\"I will first get the job done on the hand then use the device.\",padx=14,variable=Digital_Distraction,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f11,text=\"I will try not to open it up unless the work is done.\",padx=14,variable=Digital_Distraction,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f11,text=\"After fighting the urge for sometime I'll open it up.\",padx=14,variable=Digital_Distraction,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f11,text=\"I will leave the work in between and dwell into the device at that instant itself\",padx=14,variable=Digital_Distraction,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f11,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f12)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f12,text='''12.You are a student and you want everything in your day to day agenda from studies to friends\r\nto playing games and working out. After involving in so many activities you are not able to keep\r\nup with your sleeping schedule. How often do you get a good nights sleep?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nSleep_Hours=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f12,text=\"Juggling between the activities and sleep on day to day basis.\",padx=14,variable=Sleep_Hours,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f12,text=\"Leave out a few of the things to get sleep.\",padx=14,variable=Sleep_Hours,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f12,text=\"Ration my sleep throughout the day.\",padx=14,variable=Sleep_Hours,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f12,text=\"Complete my sleep on weekends.\",padx=14,variable=Sleep_Hours,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f12,text=\"Not able to cope up with sleep.\",padx=14,variable=Sleep_Hours,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f12,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f13)).pack(anchor=\"w\",padx=400,pady=14)\r\nlb=Label(f13,text='''13.You are to submit your project in a week. Your group is not working as per the requirement\r\nand things are not going according to the deadline.How is this situation going to affect your completion\r\nbefore or on the deadline?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nTime_Pressure=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f13,text=\"If no one listens to me will try to complete the project myself in the deadline.\",padx=14,variable=Time_Pressure,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f13,text=\"Will try to convince everyone to start working as the deadline is coming near.\",padx=14,variable=Time_Pressure,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f13,text=\"I will be working with the same amount of rigour.\",padx=14,variable=Time_Pressure,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f13,text=\"Will delay the work a bit more but start working as the deadline comes near.\",padx=14,variable=Time_Pressure,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f13,text=\"I will go with flow and procastinate.\",padx=14,variable=Time_Pressure,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\").pack(anchor=\"w\",pady=14)\r\nbtn=Button(f13,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f14)).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nlb=Label(f14,text='''14.Your father recently lost his job. He's looking for a source of income. You're in your final year\r\nof your college and there is your farewell party after a week.Your father calls you explaining the situatuion.\r\nWhat will you do?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nFinancial_Pressure=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f14,text=\"\"\"You completely understand the situation and offer a helping hand by telling him you'll be taking up an\r\ninternship to help in sustaining the household.\"\"\",padx=14,variable=Financial_Pressure,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f14,text=\"You tell him about the party and ask him for more money so that you'll be able to enjoy the last year of college.\",padx=14,variable=Financial_Pressure,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f14,text=\"You act infront of him as you are still in the college and it is his duty to help you.\",padx=14,variable=Financial_Pressure,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f14,text=\"You fight with him that he is not fulfilling his responsibilities and start a ruckus.\",padx=14,variable=Financial_Pressure,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f14,text=\"You quietly take out some money from his wallet and add your savings and enjoy your party.\",padx=14,variable=Financial_Pressure,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f14,text=\"Next\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=lambda:raise_frame(f15)).pack(anchor=\"w\",padx=400,pady=12)\r\n\r\nlb=Label(f15,text='''15.Due to work/personal commitments you are not able to let go of the devices and are surrounded by\r\nthem all the time. How often does that happen that youre not able to let go of them?''',font=(\"arial\",14,\"bold\",\"italic\"),bg=\"alice blue\",pady=16).pack(anchor=\"w\")\r\nTech_Obligations=IntVar()\r\n#var.set(1)\r\nr1=Radiobutton(f15,text=\"There is no such obligation to use technology all the time.\",padx=14,variable=Tech_Obligations,value=1,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f15,text='There are times when i have to use the devices to complete some tasks.',padx=14,variable=Tech_Obligations,value=2,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f15,text=\"I am using it occasionaly to help me complete the work as well at glance at social media.\",padx=14,variable=Tech_Obligations,value=3,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f15,text=\"I am more often surrounded with devices.\",padx=14,variable=Tech_Obligations,value=4,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nr1=Radiobutton(f15,text=\"If given an option i could use devices in my sleep.\",padx=14,variable=Tech_Obligations,value=5,font=(\"arial\",14,\"italic\"),bg=\"alice blue\",pady=14).pack(anchor=\"w\")\r\nbtn=Button(f15,text=\"Show Result\",bd=10,width=10,relief=\"raised\",bg=\"green yellow\",command=fetch).pack(anchor=\"w\",padx=400,pady=14)\r\n\r\nraise_frame(f0)\r\nwin.mainloop()\r\n" }, { "alpha_fraction": 0.6346219182014465, "alphanum_fraction": 0.661128044128418, "avg_line_length": 27.935701370239258, "blob_id": "9275a73df1e6fd9638bddff2f5d44867bf07114d", "content_id": "f692fcc3d30ae650e652ba68cac276da663a8f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17694, "license_type": "no_license", "max_line_length": 485, "num_lines": 591, "path": "/MPR.py", "repo_name": "jackbhuber/stress-analysis-using-python-with-machine-learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 24 16:43:56 2019\r\n\r\n@author: Anubhuti Singh\r\n\"\"\"\r\n\r\n#importing packages\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#reading the csv file\r\ndf = pd.read_csv(\"Assessment.csv\")\r\n\r\n#displaying first ten columns of the csv file\r\ndf.head(10)\r\n\r\n#looking into the details of our dataset\r\ndf.describe()\r\ndf.info()\r\ndf.index.values\r\n#checking columns and rows\r\ndf.shape\r\n\r\n#adding headers to each column/feature\r\ndf.columns = ['Time','Email','Name','Age','Gender','School/College','Class','Year','Headache','Job_Awareness','Academic_Pressure','Vocal_Expression','Unhealhty_Influence','Weather','Workload','Seeking_Help','Anxiety','Faking','Physical_Health','Relationship_at_Work','Decision_Making','Self_Awareness','Screen_Time','Alcohol_Tobacco','Work_Stress','Parental_Pressure','Empathy','Digital_Distraction','Sleep_Hours','Time_Pressure','Financial_Pressure','Rship_Skills','Tech_Obligations']\r\n\r\ndf.columns.values \r\n\r\n#dropping un-required features \r\ndf.drop(['Time','Name'], axis=1, inplace=True)\r\n\r\n#displaying first two rows\r\ndf.head(2)\r\n\r\ndf.columns.values \r\n\r\n# drop duplicate by a column name \r\ndf.drop_duplicates(['Email'],inplace=True)\r\n\r\n\r\n\r\n#changing strings to numeric values of the questions\r\n\r\ncleanup_nums = {\"Headache\": {\"Very often\": 5, \"Often\": 4, \"Sometimes\":3, \"Rarely\":2, \"Never\":1 },\r\n \"Job_Awareness\": {\"Never\": 5, \"Seldom\": 4, \"Sometimes\":3, \"Often\":2, \"Always\":1 },\r\n \"Academic_Pressure\": {\"very great extent\": 5, \"great extent\": 4, \"some extent\":3, \"little extent\":2, \"no impact\":1 },\r\n \"Vocal_Expression\": {\"really bad\": 5, \"bad\": 4, \"fair\":3, \"good\":2, \"really good\":1 },\r\n \"Unhealhty_Influence\": {\"Always\": 5, \"Often\": 4, \"Sometimes\":3, \"Seldom\":2, \"Never\":1 },\r\n \"Weather\": {\"Very often\": 5, \"Often\": 4, \"Sometimes\":3, \"Rarely\":2, \"Never\":1 },\r\n \"Workload\": {\"Always\": 5, \"Often\": 4, \"Sometimes\":3, \"Seldom\":2, \"Never\":1 },\r\n \"Seeking_Help\": {\"Never\": 5, \"Very rarely\": 4, \"Rarely\":3, \"Often\":2, \"Very often\":1 },\r\n \"Anxiety\": {\"Always\": 5, \"Mostly\": 4, \"Sometimes\":3, \"Few times\":2, \"Never\":1 },\r\n \"Faking\": {\"Always\": 5, \"Often\": 4, \"Sometimes\":3, \"Seldom\":2, \"Never\":1 },\r\n \"Physical_Health\": {\"Never\": 5, \"rarely\": 4, \"Sometimes\":3, \"Often\":2, \"Always\":1 },\r\n \"Relationship_at_Work\": {\"Always\": 5, \"Often\": 4, \"Sometimes\":3, \"Seldom\":2, \"Never\":1 },\r\n \"Decision_Making\": {\"negligible\": 5, \"very little extent\": 4, \"little extent\":3, \"great extent\":2, \"very great extent\":1 },\r\n \"Self_Awareness\": {\"Strongly agreed\": 5, \"Agreed\": 4, \"Neutral\":3, \"Disagreed\":2, \"Strongly disagree\":1 },\r\n \"Screen_Time\": {\">2.5 hours\": 5, \"2-2.5 hours\": 4, \"2 hours\":3, \"1-1.5 hours\":2, \"<1 hour\":1 },\r\n \"Alcohol_Tobacco\": {\"Very often\": 5, \"Often\": 4, \"Sometimes\":3, \"Rarely\":2, \"Never\":1 },\r\n \"Work_Stress\": {\"Very often\": 5, \"Often\": 4, \"Sometimes\":3, \"Rarely\":2, \"Never\":1 },\r\n \"Parental_Pressure\": {\"Strongly agreed\": 5, \"Agreed\": 4, \"Neutral\":3, \"Disagreed\":2, \"Strongly Disagreed\":1 },\r\n \"Empathy\": {\"Never\": 5, \"Rarely\": 4, \"Sometimes\":3, \"Often\":2, \"Always\":1 },\r\n \"Digital_Distraction\": {\"Always\": 5, \"Often\": 4, \"Sometimes\":3, \"Seldom\":2, \"Never\":1 },\r\n \"Sleep_Hours\": {\"Rarely\": 5, \"Only weekends\": 4, \"Three- Four nights a week\":3, \"Four-Five nights a week\":2, \"Mostly every night\":1 },\r\n \"Time_Pressure\": {\"Strongly agree\": 5, \"Agree\": 4, \"Neutral\":3, \"Disagree\":2, \"Strongly disagree\":1 },\r\n \"Financial_Pressure\": {\"Strongly agreed\": 5, \"Agreed\": 4, \"Neutral\":3, \"Disagreed\":2, \"Strongly disagreed\":1 },\r\n \"Rship_Skills\": {\"Never\": 5, \"Very rarely\": 4, \"Sometimes\":3, \"Often\":2, \"Always\":1 },\r\n \"Tech_Obligations\": {\"Always\": 5, \"Often\": 4, \"Sometimes\":3, \"Seldom\":2, \"Never\":1 },\r\n \r\n }\r\n\r\ndf.replace(cleanup_nums, inplace=True)\r\ndf.head()\r\n\r\n#converting our dataset into dataframe\r\ndata = pd.DataFrame(df)\r\ndata.head()\r\n\r\n#Adding the main feature- Result\r\ndata['Result'] = data[['Headache','Job_Awareness','Academic_Pressure','Vocal_Expression','Unhealhty_Influence','Weather','Workload','Seeking_Help','Anxiety','Faking','Physical_Health','Relationship_at_Work','Decision_Making','Self_Awareness','Screen_Time','Alcohol_Tobacco','Work_Stress','Parental_Pressure','Empathy','Digital_Distraction','Sleep_Hours','Time_Pressure','Financial_Pressure','Rship_Skills','Tech_Obligations']].mean(axis=1)\r\n\r\nprint(data)\r\ndata.drop(['Email','School/College'], axis=1, inplace=True)\r\ndata.columns.values \r\ndata.shape\r\n\r\n\r\ndef f(row):\r\n if row['Result']>=1.00 and row['Result']<=2.33:\r\n val = \"Low Stress\"\r\n elif row['Result']>=2.34 and row['Result']<=3.67:\r\n val = \"Medium Stress\"\r\n else:\r\n val = \"High Stress\"\r\n return val\r\n\r\ndata['Level'] = data.apply(f, axis=1)\r\n\r\ndata.head(10)\r\ndata.drop(data[data['Gender'] == 'Other'].index, inplace = True)\r\n\r\n\r\n#new plots\r\nax = data.groupby(['Level','Gender']).size().unstack().plot(kind='bar',stacked=False,figsize=(13,8))\r\ntotals = []\r\n\r\n# find the values and append to list\r\nfor i in ax.patches:\r\n totals.append(i.get_height())\r\n\r\n# set individual bar lables using above list\r\ntotal = sum(totals)\r\n\r\n# set individual bar lables using above list\r\nfor i in ax.patches:\r\n # get_x pulls left or right; get_height pushes up or down\r\n ax.text(i.get_x(), i.get_height()+3, \\\r\n str(round((i.get_height()/total)*100, 2))+'%', fontsize=14,\r\n color='black')\r\nplt.show()\r\n\r\ndata=data.replace('BTEC', 'BTech.')\r\n#data.drop(data[(data.Class != 'BTech.') & (data.Class != 'BCA')].index, inplace=True)\r\n#data.groupby(['Level','Class']).size().unstack().plot(kind='bar',stacked=False,figsize=(15,10))\r\n#plt.show()\r\n\r\n\r\n#analysis of dataset for null values using heatmap\r\n#sns.heatmap(df.isnull())\r\n# Clearly; Heatmap of isnull indicates that this dataset does not have any null values\r\ndata.isnull()\r\n\r\n#representation of stress score of different entries\r\ndata['Result'].plot(kind='hist', figsize=(8, 5))\r\nplt.title('Lesser Score Means Less Stress') # add a title to the histogram\r\nplt.ylabel('Number of Entries') # add y-label\r\nplt.xlabel('Stress Score') # add x-label\r\nplt.show()\r\n\r\n#participation of girls vs boys\r\ndata['Gender'].value_counts().sort_index().plot.bar(figsize=(12, 6), color='purple',\r\n fontsize=16,title='Participation of Girls and Boys')\r\nplt.xlabel('Gender')\r\nplt.ylabel(\"Frequency\")\r\n#participation of various aged students\r\ndata['Age'].value_counts().sort_index().plot.bar(figsize=(12, 6), color='green',\r\n fontsize=16,title='Participation of various aged students')\r\nplt.xlabel('Age')\r\nplt.ylabel(\"Frequency\")\r\n\r\n\r\nbox_plot_data=[data['Age'],data['Result']]\r\nplt.boxplot(box_plot_data)\r\nplt.ylabel(\"Age\")\r\n\r\ndata.drop(data[data['Age'] > 80].index, inplace = True)\r\n#participation of various students of different courses\r\ndata['Class'].value_counts().sort_index().plot.bar(figsize=(12, 6), color='red',\r\n fontsize=16,title='Participation of various students of different courses')\r\nplt.xlabel('Courses')\r\nplt.ylabel(\"Frequency\")\r\n#participation of college students of various years\r\ndata['Year'].value_counts().sort_index().plot.bar(figsize=(12, 6), color='mediumvioletred',\r\n fontsize=16,title='Participation of college students of various years')\r\nplt.xlabel('Year')\r\nplt.ylabel(\"Frequency\")\r\n\r\n\r\n\r\n\r\n\r\n#Visualization of different features vs the Stress Score\r\n\r\na=data['Gender']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.show()\r\n\r\n\r\na=data['Age']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.show()\r\n\r\n\r\na=data['Headache']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Headache')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Purples\")\r\nplt.ylabel('Headache')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n#lot of people associated with medium or high stress score suffer from headaches frequently\r\n\r\n#Similarly analysis of other features wrt score is done below\r\na=data['Job_Awareness']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Job Awareness')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Blues\")\r\nplt.ylabel('Job Awareness')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Academic_Pressure']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Academic Pressure ')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Greens\")\r\nplt.ylabel('Academic Pressure ')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Vocal_Expression']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Vocal Expression ')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Oranges\")\r\nplt.ylabel('Vocal Expression ')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Unhealhty_Influence']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Unhealthy Influence ')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.ylabel('Unhealthy Influence')\r\nplt.xlabel(\"Stress Score\")\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Reds\")\r\nplt.show()\r\n\r\na=data['Weather']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Weather')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"YlOrBr\")\r\nplt.ylabel('Weather')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Workload']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Workload')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"YlOrRd\")\r\nplt.ylabel('Workload')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Seeking_Help']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('seeking help')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"OrRd\")\r\nplt.ylabel('seeking help')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Anxiety']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('anxiety')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"PuRd\")\r\nplt.ylabel('anxiety')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Faking']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('faking ')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"RdPu\")\r\nplt.ylabel('faking ')\r\nplt.xlabel(\"Stress Score\")\r\n\r\nplt.show()\r\n\r\na=data['Physical_Health']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('physical health')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"BuPu\")\r\nplt.ylabel('physical health')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Relationship_at_Work']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('relationship at work')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"GnBu\")\r\nplt.ylabel('relationship at work')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Decision_Making']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('decison making')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"PuBu\")\r\nplt.ylabel('decison making')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Self_Awareness']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('self awareness')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"YlGnBu\")\r\nplt.ylabel('self awareness')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Screen_Time']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('screen time')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"PuBuGn\")\r\nplt.ylabel('screen time')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Alcohol_Tobacco']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Alcohol_Tobacco')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"BuGn\")\r\nplt.ylabel('Alcohol_Tobacco')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Work_Stress']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('Work stress')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Reds\")\r\nplt.ylabel('Work stress')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Parental_Pressure']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('parental pressure')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Oranges\")\r\nplt.ylabel('parental pressure')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Empathy']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('empathy')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Blues\")\r\nplt.ylabel('empathy')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Digital_Distraction']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('digital distraction')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Greens\")\r\nplt.ylabel('digital distraction')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Sleep_Hours']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('sleep cycle')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"YlGn\")\r\nplt.ylabel('sleep cycle')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Time_Pressure']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('time pressure')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"PuRd\")\r\nplt.ylabel('time pressure')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Financial_Pressure']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('financial pressure')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"PuBu\")\r\nplt.ylabel('financial pressure')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Rship_Skills']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('relationship skills')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Reds\")\r\nplt.ylabel('relationship skills')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\na=data['Tech_Obligations']\r\nb=data['Result']\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(b,a)\r\nplt.ylabel('tech obligations')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\nplt.hexbin(b,a, gridsize=(10,10),cmap=\"Greens\")\r\nplt.ylabel('tech obligations')\r\nplt.xlabel(\"Stress Score\")\r\nplt.show()\r\n\r\n#on the basis of the above visualisations, few features show low impact to high/medium level of stress score\r\n#these features are being dropped below\r\ndata.drop(['Decision_Making','Alcohol_Tobacco','Empathy','Rship_Skills'], axis=1, inplace=True)\r\n\r\ndata.shape\r\n\r\ndata.to_csv('Updated.csv')\r\n\r\ndp = pd.read_csv(\"Updated.csv\")\r\ndp.shape\r\n#IMPLEMENTING RANDOM FOREST ALGORITHM \r\n\r\n\r\n\r\n#independent variables\r\nX = pd.DataFrame(dp.iloc[:,5:-2])\r\nX\r\n#dependent variable\r\n\r\nY =pd.DataFrame(dp.iloc[:,-2])\r\nY\r\n\r\n#split the dataset into train and test sets\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20)\r\n\r\n\r\n#Build the random forest regression model with random forest regressor function\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nregressor = RandomForestRegressor(n_estimators=20, random_state=0)\r\nregressor.fit(X_train,Y_train)\r\nY.pred= regressor.predict(X_test)\r\nY.pred\r\n\r\n\r\nfrom sklearn import metrics\r\nprint('Mean Absolute Error:', metrics.mean_absolute_error(Y_test, Y.pred))\r\nprint('Mean Squared Error:', metrics.mean_squared_error(Y_test, Y.pred))\r\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(Y_test, Y.pred)))\r\n\r\n\r\n\r\nfrom sklearn.metrics import r2_score\r\nr2=r2_score(Y_test,Y.pred)\r\nr2\r\n\r\n\r\nfrom sklearn.feature_selection import RFE\r\nrfe=RFE(regressor,15) #we are selecting the 15 most important features\r\nrfe=rfe.fit(X,Y)\r\nprint(rfe.support_) #it will display TRUE for features that are important\r\nprint(rfe.ranking_)\r\nX.shape\r\nX\r\n\r\n\r\n#on the basis of the result of RFE, we are now dropping the features that showed result FALSE in RFE\r\ndp.drop(['Weather','Seeking_Help','Faking','Self_Awareness','Screen_Time','Parental_Pressure'],axis=1,inplace=True)\r\n\r\n\r\n#checking accuracy again\r\nX = pd.DataFrame(dp.iloc[:,5:-2])\r\nX\r\n\r\n\r\nY =pd.DataFrame(dp.iloc[:,-2])\r\nY\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20)\r\nX_train\r\nY_train\r\nregressor = RandomForestRegressor(n_estimators=20, random_state=0)\r\nregressor.fit(X_train,Y_train)\r\nY.pred= regressor.predict(X_test)\r\nY.pred\r\nY_test.shape\r\n\r\nr2=r2_score(Y_test,Y.pred)\r\nr2\r\n#r2=0.8042412442406475 (might differ everytime)\r\n\r\nplt.figure(figsize=(20,10))\r\n\r\nplt.scatter(Y_test,Y.pred,color=\"red\")\r\nplt.xlabel('Y_test')\r\nplt.ylabel('Y.pred')\r\n#plt.plot(Y_test, Y.pred, color='red', linewidth=2)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n#'Headache','Job_Awareness', 'Academic_Pressure', 'Vocal_Expression','Unhealhty_Influence', 'Workload', 'Anxiety', 'Physical_Health',\r\n#'Relationship_at_Work', 'Work_Stress', 'Digital_Distraction',\r\n#'Sleep_Hours', 'Time_Pressure', 'Financial_Pressure',\r\n#'Tech_Obligations'\r\n\r\n" }, { "alpha_fraction": 0.8187134265899658, "alphanum_fraction": 0.8187134265899658, "avg_line_length": 170, "blob_id": "a72d71406b95ff6841b93b1f0065e2f424731ec9", "content_id": "2bbf0e8110a67a1727978f79769ef88e59fb58d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 342, "license_type": "no_license", "max_line_length": 288, "num_lines": 2, "path": "/README.md", "repo_name": "jackbhuber/stress-analysis-using-python-with-machine-learning", "src_encoding": "UTF-8", "text": "# stress-analysis-using-python-with-machine-learning\nThe project is research and survey based that collects information on factors affecting stress and determining the factors contributing the most to stress in Generation Z. Further, with the help of the ML model built, a GUI quiz is created based on the results obtained from the ML model.\n" } ]
3
Monteven/CSGO-Machine-Learning
https://github.com/Monteven/CSGO-Machine-Learning
c2c76438412a649401bb2d864a850ce280ce2f75
c69558b2b7d7ae11bac65f224413f097118a49a9
52687106bc6e739222b5f4a3dfdb6fc6db3a37d5
refs/heads/master
2021-05-26T05:17:44.771598
2018-04-10T12:18:22
2018-04-10T12:18:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.673285186290741, "alphanum_fraction": 0.6895306706428528, "avg_line_length": 28.972972869873047, "blob_id": "d87283df116488197efb9baad8a45031019b70b7", "content_id": "59054a2443ddfa516ca5af2a3e6ad2a17120c53a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 199, "num_lines": 37, "path": "/README.md", "repo_name": "Monteven/CSGO-Machine-Learning", "src_encoding": "UTF-8", "text": "## Machine Learning Naive-Bayes algorithm that learns between the KD of any two CS:GO players.\n\n#### Prerequisites:\n- numpy\n- sklearn\n- matplotlib\n- pylab\n##### For the web crawler:\n- beautifulsoup4\n- requests\n\nAn easy way to get all the prerequisites is to use Anaconda as your python interpreter, found [here](https://www.anaconda.com/download/).\n\n---------------------------------\n\n#### Style for text.txt:\n\nFirst line = your first player (#0)\nSecond line = your second player (#1)\n\nplayer # as an example:\n0 = simple\n1 = zeus\n\nBelow this the numbers are ordered like this:\n\nkills deaths player\n\ne.g. **23 12 0** means 23 kills, 12 deaths, and is the first player (s1mple).\n\n---------------------------------\n\n#### How to use hltv_crawl.py\n\nEnsure that data.txt only has two lines of the two player names. If this doesn't happen the file will be written to incorrectly and so main.py will not work (e.g. there can be no spaces in the file).\n\nDo not use too many data points as it will clutter the graph and make it unreadable. Personally I have found about 100 points for each player to be a good maximum." }, { "alpha_fraction": 0.6068450808525085, "alphanum_fraction": 0.6248354315757751, "avg_line_length": 38.2931022644043, "blob_id": "b97b4558e60be3e04404f5baae1c416a736fa957", "content_id": "f6ad798f491d568ceabb915499a34b0373e1d86f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2279, "license_type": "no_license", "max_line_length": 116, "num_lines": 58, "path": "/draw_graph.py", "repo_name": "Monteven/CSGO-Machine-Learning", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pylab as pl\nimport numpy as np\n\n# Written by Udacity for their ML course. I have added the variable axes and simplified some processes.\n\nlegend = 0\n\n\ndef picture(clf, x_test, y_test, int_progressive, xaxis_min, xaxis_max, yaxis_min, yaxis_max, should_add_prediction,\n predict_kills, predict_deaths, player0, player1):\n global legend\n\n image_name = \"images/img\" + str(int_progressive) + \".png\"\n\n x_min = xaxis_min-5\n x_max = int(xaxis_max+(xaxis_max/7.5))\n y_min = yaxis_min-5\n y_max = int(yaxis_max+(yaxis_max/7.5))\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n h = 1 # step size in the mesh\n if int_progressive != 1:\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n z = clf.predict(np.c_[yy.ravel(), xx.ravel()])\n\n # Put the result into a color plot\n z = z.reshape(xx.shape)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n\n plt.pcolormesh(xx, yy, z, cmap=pl.cm.seismic)\n\n # Plot points\n kills_player0 = [x_test[ii][0] for ii in range(0, len(x_test)) if y_test[ii] == 0]\n deaths_player0 = [x_test[ii][1] for ii in range(0, len(x_test)) if y_test[ii] == 0]\n kills_player1 = [x_test[ii][0] for ii in range(0, len(x_test)) if y_test[ii] == 1]\n deaths_player1 = [x_test[ii][1] for ii in range(0, len(x_test)) if y_test[ii] == 1]\n\n plt.scatter(deaths_player0, kills_player0, color=\"cyan\", label=player0)\n plt.scatter(deaths_player1, kills_player1, color=\"orange\", label=player1)\n plt.scatter(xaxis_min - 5, yaxis_min - 5, color=\"blue\", label=player0 + \" surface\")\n plt.scatter(xaxis_min - 5, yaxis_min - 5, color=\"red\", label=player1 + \" surface\")\n\n # Should we add a prediction to the graph?\n if should_add_prediction == \"y\":\n plt.scatter(predict_deaths, predict_kills, color=\"green\", label=\"prediction\")\n image_name = \"images/img\" + str(int_progressive) + \"_prediction.png\"\n\n # Ensuring only one legend is added\n if legend == 0:\n plt.legend()\n plt.xlabel(\"deaths\")\n plt.ylabel(\"kills\")\n legend = 1\n\n plt.savefig(image_name)\n print(image_name)\n" }, { "alpha_fraction": 0.6164296865463257, "alphanum_fraction": 0.6353870630264282, "avg_line_length": 33.032257080078125, "blob_id": "8e9cac41742eb1716e15d12d50e6957ff0eed0d2", "content_id": "5bc4ac5083e48b8e403af6b6058ab9c0293410d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3165, "license_type": "no_license", "max_line_length": 120, "num_lines": 93, "path": "/hltv_crawl.py", "repo_name": "Monteven/CSGO-Machine-Learning", "src_encoding": "UTF-8", "text": "from requests import get\nfrom bs4 import BeautifulSoup\n\nprint(\"This program is designed to pull the stats of players from HLTV. In order for this program to work please ensure\"\n \" all lines except from the first 2 are clear as these lines are for the player names.\")\nto_continue = input(\"\\nIs this done y/n: \").lower()\nif to_continue != \"y\":\n quit()\nnumber_of_points = int(input(\"\\nHow many data points for each person: \"))\n# Url for HLTV player list\nurl = \"https://www.hltv.org/stats/players\"\n\n# Getting the raw html\nresponse = get(url)\nhtml_soup = BeautifulSoup(response.text, 'html.parser')\n# print(html_soup)\n# Finding the specific cell row\ncontainers = html_soup.find_all('td', class_='playerCol')\n\n# Adding all the player names in HLTV to a big array\nplayer_list = []\ncontainer_list = []\nfor i in range(len(containers)):\n player_list.append(containers[i].text.lower())\n container_list.append(containers[i])\n\nf = open(\"data.txt\", 'r')\nfile = f.readlines()\nplayer0_name = file[0].lower().strip()\nplayer1_name = file[1].lower().strip()\n\n# Player0\nq = open(\"data.txt\", 'a')\nif player0_name in player_list:\n # Finding index of player in the array\n index = player_list.index(player0_name)\n\n # Creating the URLs for the web scraper\n player0_url = \"https://hltv.org\" + container_list[index].a['href']\n player0_url_matches = player0_url[:31] + \"matches\" + player0_url[30:]\n\n player0_response = get(player0_url_matches)\n html_soup_0 = BeautifulSoup(player0_response.text, 'html.parser')\n\n containers_0 = html_soup_0.find_all('td', class_='statsCenterText')\n if len(containers_0) > number_of_points:\n limit = number_of_points\n else:\n limit = len(containers_0)\n\n for i in range(limit):\n # Splitting the KD array into kills and deaths\n kd_array = containers_0[i].text.split(\" - \")\n if i == 0: # This is needed as the first line needs to be on a new line\n kd = \"\\n\" + kd_array[0] + \" \" + kd_array[1] + \" 0\\n\"\n else:\n kd = kd_array[0] + \" \" + kd_array[1] + \" 0\\n\"\n print(kd)\n q.write(kd)\nelse:\n print(\"The first player name cannot be found. Please check the name and retry.\")\n quit()\n# Player1\nif player1_name in player_list:\n index = player_list.index(player1_name)\n\n player1_url = \"https://hltv.org\" + container_list[index].a['href']\n player1_url_matches = player1_url[:31] + \"matches\" + player1_url[30:]\n\n player1_response = get(player1_url_matches)\n html_soup_1 = BeautifulSoup(player1_response.text, 'html.parser')\n\n containers_1 = html_soup_1.find_all('td', class_='statsCenterText')\n if len(containers_1) > number_of_points:\n limit = number_of_points\n else:\n limit = len(containers_1)\n\n for i in range(limit):\n kd_array = containers_1[i].text.split(\" - \")\n if i == limit-1:\n kd = kd_array[0] + \" \" + kd_array[1] + \" 1\"\n else:\n kd = kd_array[0] + \" \" + kd_array[1] + \" 1\\n\"\n print(kd)\n q.write(kd)\nelse:\n print(\"The first player name cannot be found. Please check the name and retry.\")\n quit()\n\nprint(\"\\nDone.\")\nq.close()\nf.close()\n" }, { "alpha_fraction": 0.53461092710495, "alphanum_fraction": 0.5479927062988281, "avg_line_length": 39.62416076660156, "blob_id": "d25d9f5c1c28ce914b87bd273f90f09c4b26ebd2", "content_id": "89bad492e6b9bc89539f2a724cd5bde3d9200ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6053, "license_type": "no_license", "max_line_length": 122, "num_lines": 149, "path": "/main.py", "repo_name": "Monteven/CSGO-Machine-Learning", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom draw_graph import picture\n\nyaxis_max = 0\nxaxis_max = 0\nyaxis_min = 20\nxaxis_min = 20\n\n\ndef predict_func():\n global yaxis_max\n global xaxis_max\n global yaxis_min\n global xaxis_min\n x_list = []\n y_list = []\n current_line = 0\n f = open(\"data.txt\", 'r')\n for read_line in f.readlines():\n if current_line < 2:\n if current_line == 0:\n player0 = read_line.strip() # Player 0 is the first line\n elif current_line == 1:\n player1 = read_line.strip() # Player 1 is the second line\n else:\n i_array = read_line.split(\" \")\n x_var1 = int(i_array[0].strip()) # this is kills for player\n x_var2 = int(i_array[1]) # this is deaths for player\n y_var = int(i_array[2].strip()) # this is player#\n x_var = [x_var1, x_var2]\n x_list.append(x_var)\n y_list.append(y_var)\n if xaxis_max < x_var2: # Just dabbling with max and mins for the graph\n xaxis_max = x_var2\n elif xaxis_min > x_var2:\n xaxis_min = x_var2\n if yaxis_max < x_var1:\n yaxis_max = x_var1\n elif yaxis_min > x_var1:\n yaxis_min = x_var1\n current_line += 1\n # Using sklearn to train (fit) the algorithm\n f.close()\n x_train = np.array(x_list)\n y_train = np.array(y_list)\n classifier = GaussianNB() # Creating a classifier of a gaussian naive bayes type\n classifier.fit(x_train, y_train) # Try fit features, X, to labels, Y using training points to train classifier\n prediction_input = input(\"Prediction in form kills, deaths: \").split(\", \")\n to_be_graphed = input(\"Would you liked it graphed y/n: \").lower()\n kills = int(prediction_input[0])\n deaths = int(prediction_input[1])\n\n if kills > yaxis_max:\n yaxis_max = kills\n if deaths > xaxis_max:\n xaxis_max = deaths\n prediction = classifier.predict([[kills, deaths]]) # Now attempting to predict a label for a new set of features\n if prediction == 0:\n text = \"\\nPrediction: {} for {} kills and {} deaths.\\n\".format(player0, str(kills), str(deaths))\n print(text)\n elif prediction == 1:\n text = \"\\nPrediction: {} for {} kills and {} deaths.\\n\".format(player1, str(kills), str(deaths))\n print(text)\n else:\n print(\"Error\")\n correct_q = input(\"Was it right y/n: \").lower()\n if correct_q == \"y\":\n correct = prediction[0] # If the prediction is right set it to correct\n elif correct_q == \"n\":\n if prediction[0] == 0: # If not correct check which prediction was made and then set the right one\n correct = \"1\"\n else:\n correct = 0\n else:\n correct = \"Error\"\n to_save = input(\"Thanks! Do you want this saved for next time y/n: \").lower()\n if to_save == \"y\":\n q = open(\"data.txt\", \"a\")\n towrite = \"\\n\" + str(kills) + \" \" + str(deaths) + \" \" + str(correct)\n q.write(towrite)\n q.close()\n print(\"I'll use this for next time.\")\n num_lines = sum(1 for line in open(\"data.txt\"))\n if to_be_graphed == \"y\":\n picture(classifier, x_list, y_list, num_lines-2, xaxis_min, xaxis_max, yaxis_min, yaxis_max, to_be_graphed, kills,\n deaths, player0, player1)\n\n\ndef graph_func():\n only_at_end = input(\"Do you want the end result only (y/n): \").lower()\n global yaxis_max\n global xaxis_max\n global yaxis_min\n global xaxis_min\n x_list = []\n y_list = []\n current_line = 0\n f = open(\"data.txt\", 'r')\n for read_line in f.readlines():\n if current_line < 2:\n if current_line == 0:\n player0 = read_line.strip() # Player 0 is the first line\n elif current_line == 1:\n player1 = read_line.strip() # Player 1 is the second line\n else:\n i_array = read_line.split(\" \") # Splitting each line into kills | deaths | player#\n x_var1 = int(i_array[0].strip()) # taking the kills number\n x_var2 = int(i_array[1]) # taking the deaths number\n y_var = int(i_array[2].strip()) # taking the player#\n x_var = [x_var1, x_var2] # making our feature array\n x_list.append(x_var)\n y_list.append(y_var)\n if xaxis_max < x_var2: # Just dabbling with max and mins for the graph\n xaxis_max = x_var2\n if xaxis_min > x_var2:\n xaxis_min = x_var2\n if yaxis_max < x_var1:\n yaxis_max = x_var1\n if yaxis_min > x_var1:\n yaxis_min = x_var1\n\n # Using sklearn to train (fit) the algorithm\n x_train = np.array(x_list)\n y_train = np.array(y_list)\n classifier = GaussianNB() # Creating a classifier of a gaussian naive bayes type\n classifier.fit(x_train, y_train) # Try fit features, X, to labels, Y using training points to train\n num_lines = sum(1 for line in open(\"data.txt\"))\n if (current_line - 1) > 0: # needs to be > 0 as otherwise there aren't any samples\n if only_at_end == \"n\": # generating an image for each point\n picture(classifier, x_list, y_list, current_line-1, xaxis_min, xaxis_max, yaxis_min, yaxis_max, \"n\",\n None, None, player0, player1)\n if only_at_end == \"y\": # generating only the last image\n if current_line+2 == num_lines-1:\n picture(classifier, x_list, y_list, num_lines-2, xaxis_min, xaxis_max, yaxis_min, yaxis_max,\n \"n\", None, None, player0, player1)\n break\n current_line += 1\n f.close()\n\n\nbegin_check = input(\"Predict (p) or graph (g)? \").lower()\n\nif begin_check == \"p\":\n predict_func()\nelif begin_check == \"g\":\n graph_func()\nelse:\n quit()\n" } ]
4
CreatCodeBuild/Universal-Machine
https://github.com/CreatCodeBuild/Universal-Machine
f9a4f6a0f68a46c66cec87c40d0ab8eafb7e64d4
dfb2782b5bbff617e15937616efff433162875c4
a282b2082d749a909fd7a43f60a4866b999c1a7c
refs/heads/master
2020-12-03T07:59:10.598969
2017-06-28T08:13:15
2017-06-28T08:13:15
95,645,184
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.582129955291748, "alphanum_fraction": 0.639590859413147, "avg_line_length": 23.61481475830078, "blob_id": "c5d24536da1b20960e5126c1690c70f93855488c", "content_id": "a682f58ec4d66abca5ace4e31f9d628e718549e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3324, "license_type": "no_license", "max_line_length": 97, "num_lines": 135, "path": "/um.py", "repo_name": "CreatCodeBuild/Universal-Machine", "src_encoding": "UTF-8", "text": "registers = [0, 0, 0, 0, 0, 0, 0, 0]\narrays = []\ninstruction_pointer = 0\nprint(registers)\n\n\ndef get_register(uint):\n\tA = (uint & 0b0000000111000000) >> 6\n\tB = (uint & 0b0000000000111000) >> 3\n\tC = (uint & 0b0000000000000111)\n\n\treturn A, B, C\n\ndef program_info():\n\tprint(registers)\n\tprint('instruction pointer:', instruction_pointer)\n\tfor a in arrays:\n\t\tif a == None or len(a) < 20:\n\t\t\tprint(a)\n\t\telse:\n\t\t\tprint(len(a))\n\n\nif __name__ == '__main__':\n\t# load the initial program\n\twith open('codex.umz', mode='rb') as f:\n\t\tarrays.append(f.read())\n\n\twhile instruction_pointer < len(arrays[0]):\n\t\t# print(instruction_pointer, arrays[0][4])\n\n\t\tuint = arrays[0][instruction_pointer*4:instruction_pointer*4+4]\n\t\t# print(uint, uint[0], uint[0] & 0b1111_0000)\n\t\tuint = int.from_bytes(uint, byteorder='big', signed=False)\n\t\t# print(bin(uint))\n\t\toperator = uint >> 28\n\n\t\tA, B, C = get_register(uint)\n\t\t# print(A, B, C, operator, instruction_pointer)\n\t\t# if operator != 0:\n\t\t# \tprint(operator, '123')\n\t\t# \texit()\n\t\tif operator == 0:\n\t\t\t# Conditional Move\n\t\t\tif registers[C] != 0:\n\t\t\t\t# The spec did say if register B should be written 0 afterwards\n\t\t\t\tregisters[A] = registers[B]\n\n\t\telif operator == 1:\n\t\t\t# Array Index\n\t\t\ttry:\n\t\t\t\tregisters[A] = arrays[registers[B]][registers[C]]\n\t\t\texcept IndexError:\n\t\t\t\texit()\n\n\t\telif operator == 2:\n\t\t\t# Array Amendment\n\t\t\ttry:\n\t\t\t\tarrays[registers[A]][registers[B]] = registers[C]\n\t\t\texcept IndexError:\n\t\t\t\texit()\n\n\t\telif operator == 3:\n\t\t\tregisters[A] = registers[B] + registers[C]\n\n\t\telif operator == 4:\n\t\t\tregisters[A] = registers[B] * registers[C]\n\n\t\telif operator == 5:\n\t\t\ttry:\n\t\t\t\tregisters[A] = registers[B] / registers[C]\n\t\t\texcept ZeroDivisionError:\n\t\t\t\traise Exception('Divided By Zero')\n\n\t\telif operator == 6:\n\t\t\tregisters[A] = ~(registers[B] & registers[C])\n\n\t\telif operator == 7:\n\t\t\tprogram_info()\n\t\t\traise Exception('Halt')\n\n\t\telif operator == 8:\n\t\t\tnew_array = [0] * registers[C]\n\t\t\ttry:\n\t\t\t\tpointer = arrays.index(None)\n\t\t\t\tarrays[pointer] = new_array\n\t\t\texcept ValueError:\n\t\t\t\tpointer = len(arrays)\n\t\t\t\tarrays.append(new_array)\n\t\t\tregisters[B] = pointer\n\n\t\telif operator == 9:\n\t\t\t# Abandonment\n\t\t\tif registers[C] == 0 or arrays[registers[C]] == None:\n\t\t\t\traise Exception('Try to abandon 0 array or an inactive array')\n\t\t\ttry:\n\t\t\t\tarrays[registers[C]] = None\n\t\t\texcept IndexError:\n\t\t\t\traise Exception('Try to abandon unallocated array')\n\n\t\telif operator == 10:\n\t\t\tvalue = registers[C]\n\t\t\tif 0 <= value <= 255:\n\t\t\t\tprint(chr(value))\n\t\t\telse:\n\t\t\t\traise Exception('Output value of out range')\n\n\t\telif operator == 11:\n\t\t\ttry:\n\t\t\t\ti = ord(input())\n\t\t\t\tif 0 <= i <= 255:\n\t\t\t\t\tregisters[C] = i\n\t\t\t\telse:\n\t\t\t\t\traise Exception('Input value of out range')\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tregisters[C] = 0xFFFF\n\n\t\telif operator == 12:\n\t\t\ttry:\n\t\t\t\tnew_array = arrays[registers[B]][:] # single all elements are integers, shallow copy is fine\n\t\t\texcept IndexError and AttributeError:\n\t\t\t\traise Exception('Try to copy inactive array or unallocated array')\n\t\t\tarrays[0] = new_array\n\t\t\tinstruction_pointer = registers[C]\n\n\t\telif operator == 13:\n\t\t\tA = (uint & 0b0000_1110_0000_0000_0000_0000_0000_0000) >> 25\n\t\t\tregisters[A] = uint & 0b0000_0001_1111_1111_1111_1111_1111_1111\n\n\t\telse:\n\t\t\traise Exception(\"Invalid Operator\")\n\n\t\tinstruction_pointer += 1\n\t\t# if instruction_pointer > 3:\n\t\t# \texit()\n\n" } ]
1
raj-shr-git/heroku-apps
https://github.com/raj-shr-git/heroku-apps
a68fd749dd73ad9d2f3e20c95b4b0ddeb77e0a00
fc9abcb943ef627dfca0a238bda09ad09b4b47ea
9ec948159e9be2709afe73eb8901a5340d47c4f7
refs/heads/master
2022-12-17T16:28:37.864297
2020-09-12T12:53:00
2020-09-12T12:53:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4492753744125366, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 15.75, "blob_id": "eed40517a81b0738bc03043914bbe44bb6b0b6bd", "content_id": "f147ef3d88ba52edd7f45993f55453ecaad98995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 69, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/requirements.txt", "repo_name": "raj-shr-git/heroku-apps", "src_encoding": "UTF-8", "text": "streamlit==0.66.0\r\npandas==1.1.1\r\nnumpy==1.18.5\r\nscikit-learn==0.23.2" }, { "alpha_fraction": 0.7245600819587708, "alphanum_fraction": 0.732211172580719, "avg_line_length": 25.693878173828125, "blob_id": "d1b12cfcddce72f81818e40d026e5c544f74d121", "content_id": "302d577bd2b25a2e3fcc84b348ba81cee87e67e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 67, "num_lines": 49, "path": "/my-app2.py", "repo_name": "raj-shr-git/heroku-apps", "src_encoding": "UTF-8", "text": "import os\nimport streamlit as st\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\nst.sidebar.image(os.path.join('Datasciencepro.png'),None,100)\nst.write(\"\"\"\n# HR Analytics - Predict the **Salary** based on **Experience** \nSimple Linear Regression!\n\"\"\")\n\nst.sidebar.header('User Input Parameters')\n\ndef user_input_features():\n Experience = st.sidebar.slider('Experience in Years', 0, 20, 0)\n data = {'Experience': Experience}\n features = pd.DataFrame(data, index=[0])\n return features\n\ndf = user_input_features()\n\nst.subheader('User :: Input parameters')\nst.write(df)\n\n# os.chdir(r'C:\\Users\\Rahul\\Desktop\\Streamlit')\ndataset = pd.read_csv(os.path.join('Salary_Data.csv'))\nX = dataset.iloc[:,:-1].values\nY = dataset.iloc[:,1].values\n\nregressor = LinearRegression()\nregressor.fit(X, Y)\n\nprediction = regressor.predict(df)\n#prediction_proba = clf.predict_proba(df)\n\n#st.subheader('Class labels and their corresponding index number')\n#st.write(iris.target_names)\n\nst.subheader('Prediction')\n#st.write(iris.target_names[prediction])\nst.write(prediction)\n\nst.subheader('The regression coefficients are:')\nst.write('Slope of a line:', regressor.coef_)\n\nst.write('Y - Intercept of a line:', regressor.intercept_)\n\n#st.subheader('Prediction Probability')\n#st.write(prediction_proba)" } ]
2
TheMLGuy/Pokemon-Type-Classification-Challenge
https://github.com/TheMLGuy/Pokemon-Type-Classification-Challenge
4dd1739e8895ba7ccaf52e624eba94eab884c56b
d059f52cd9f9c7ee9e55bf576afc324cdc2dd1f7
bc72d36ddb7dbfb455f28195efd84909850f700a
refs/heads/master
2021-01-13T03:32:12.880348
2018-02-02T06:33:46
2018-02-02T06:33:46
77,532,639
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5771812200546265, "alphanum_fraction": 0.6398210525512695, "avg_line_length": 20.897436141967773, "blob_id": "557192ca54d7b872a0787f1bc72e06ff9387f854", "content_id": "f0a703ecbbd1e97501a320cf452e8460c76cea02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "no_license", "max_line_length": 62, "num_lines": 39, "path": "/pokemon.py", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 28 16:50:28 2016\r\n\r\n@author: DELL\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tfnet\r\n#import tensorflow as tf\r\ndf=pd.read_csv('C:/Users/DELL/Anaconda3/Datasets/Pokemon.csv')\r\n#print(df.keys())\r\n#print(df.head(1))\r\n#df=df[:,4:9]\r\narr=np.array(df)\r\n#print(df['Type 1'].unique()) \r\n#print(len(arr[:,4:9]))\r\nlabels=arr[:,2:3]\r\nvar=arr[:,4:11]\r\nprint(\"------------\")\r\n#print(len(arr[:,2]))\r\nuniqueClass=df['Type 1'].unique() \r\ndataset=np.hstack([var,labels])\r\nprint(uniqueClass)\r\n\r\ndata = np.asarray(var,dtype=np.float32)\r\nmean = data.mean(axis=0)\r\nstd = data.std(axis=0)\r\ndata = (data - mean) / std\r\n\r\n#create 70% train data\r\nnrow=df.shape[0]\r\nprint(data[0:1,:])\r\ntrain_data=data[0:561,:]\r\ntrain_labels=labels[0:561]\r\ntest_data=data[562:801,:]\r\ntest_labels=labels[562:801]\r\ntfnet.main(train_data, train_labels, test_data, test_labels)\r\n\t" }, { "alpha_fraction": 0.6593198776245117, "alphanum_fraction": 0.6895465850830078, "avg_line_length": 26.491228103637695, "blob_id": "686b3f9614224fc3b60418487a699b2b2a6fc425", "content_id": "ad75ea821932c7adc464c37db6139651e82ee11f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1588, "license_type": "no_license", "max_line_length": 72, "num_lines": 57, "path": "/simpleNN.py", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\n#read the data\npokemonDF=pd.read_csv('Pokemon.csv')\n\n#print data\nprint(pokemonDF.head(2))\n\n#define the unique pokemons in the dataset and assign to nclasses\n# We expect any pokemon to be in one of the classes determined as below\nnclasses=pokemonDF['Type 1'].unique().shape[0]\n\n#define hyperparamters\nlearning_rate=0.001\ntraining_epochs=200\n\n#network paramters\nhidden_1=13\nhidden_2=12\ninput_layer=7\nn_classes=18\n\n#define placeholders\nX=tf.placeholder(tf.float32,shape=[None,input_layer])\ny=tf.placeholder(tf.float32,[None,n_classes])\n\n\n\nW = {\n 'h1':tf.Variable(tf.random_normal([input_layer,hidden_1])),\n 'h2':tf.Variable(tf.random_normal([hidden_1,hidden_2])),\n 'out':tf.Variable(tf.random_normal([hidden_2,n_classes])) \n }\nb={\n 'b1':tf.Variable(tf.random_normal([hidden_1])),\n 'b2':tf.Variable(tf.random_normal([hidden_2])),\n 'out':tf.Variable(tf.random_normal([n_classes])) \n }\n\nhlayer_1=tf.add(tf.matmul(X,W['h1']),b['b1'])\nhlayer_1=tf.nn.relu(hlayer_1)\nhlayer_2=tf.add(tf.matmul(hlayer_1,W['h2']),b['b2'])\nhlayer_2=tf.nn.relu(hlayer_2)\noutLayer=tf.add(tf.matmul(hlayer_2,W['out']),b['out'])\ncost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(outLayer,y))\noptimizer=tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain=optimizer.minimize(cost)\ninit=tf.global_variables_initializer()\n\nwith tf.Session() as ses:\n ses.run(init)\n for ep in range(training_epochs):\n ses.run(train)\n if ep %50==0:\n print(ep, ses.run(W), ses.run(b))\n \n " }, { "alpha_fraction": 0.6496815085411072, "alphanum_fraction": 0.6724295020103455, "avg_line_length": 21.4489803314209, "blob_id": "b139c651b19211fc09d06c21548f72076f295ab7", "content_id": "d5f8a482a13a0b1c335990848a6c5bf6d95a232b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "no_license", "max_line_length": 70, "num_lines": 49, "path": "/preprocess.py", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 vishalapr <vishalapr@vishal-Lenovo-G50-70>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\n\"\"\"\n\nimport numpy as np\nimport math\nimport random\n\n# Give a class ID to each output type\nclasses = {}\n\nfeatures = 7\ncurrent_class_id = 1\n\ndef get_data(data_file):\n\tglobal current_class_id\n\tdata = []\n\tlabels = []\n\tinp_file = open(data_file,'r')\n\tlines = inp_file.readlines()\n\t# Ignore the first line since it is just the names of each attribute\n\tfor line in lines[1:]:\n\t\tdata_point = []\n\t\tdata_cur = line.split(',')\n\t\t# Labels 4 to 10 give us the attributes which we can use to classify\n\t\tfor i in range(4,len(data_cur)-2):\n\t\t\tdata_point.append(data_cur[i])\n\t\t# Add the type of the pokemon as a label\n\t\tlabels.append(data_cur[2])\n\t\tdata.append(data_point)\n\t\tif data_cur[2] not in classes:\n\t\t\tclasses[data_cur[2]] = current_class_id\n\t\t\tcurrent_class_id += 1\n\treturn data, labels\n\ndef normalize_data(data):\n\tdata = np.asarray(data,dtype=np.float32)\n\tmean = data.mean(axis=0)\n\tstd = data.std(axis=0)\n\tdata = (data - mean) / std\n\treturn data" }, { "alpha_fraction": 0.6522634029388428, "alphanum_fraction": 0.6838134527206421, "avg_line_length": 59.75, "blob_id": "649f644648b863b8c00803e2d180f9f9805e0403", "content_id": "45555474680eb7799c1a023aafe1cafa9e690f5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1458, "license_type": "no_license", "max_line_length": 349, "num_lines": 24, "path": "/README.md", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "# Pokemon Type Prediction\n\n* Given the stats of a Pokemon as the input, predict which type they belong to (their primary type). An example is, Bulbasaur -> Grass. The task, although it seems rather simple, is not that easy since a lot of the stats are very similar in a lot of different types. Pokemon that have two types make it even harder since their stats would be shared.\n* There are 19 types in total, so the network performs a 19 class classification\n* Implemented a simple 4 layered neural network (2 hidden layers) with a softmax layer at the end\n* Uses adam optimization to perform updation\n* Computes a top-5 match in the accuracy (ie. If any one of the top 5 classes match the correct output, we consider it as correct). Also performed a few experiments with top-3 and top-1 matches as well\n\n## Pokemon Type Challenge\nPokemon Type Prediction challenge by @Sirajology on [Youtube](https://www.youtube.com/watch?v=0xVqLJe9_CY)\n\n## Dependencies\n* Tensorflow\n* Numpy\n* Pandas\n\n## Usage\nRun `python MLP.py` and it would train the network and then run it on a randomly subsampled test dataset (not included in the training) and print the accuracy ofr two evaluation methods.\n\n## Results\n|ID |Top-K |Network-Shape |Iterations |Accuracy.avg\t| \n|--------|-------|---------------|---------------|--------------|\n|1 |5 |(7,512,256,18) |100 |58.41 |\n|2 |5 |(7,128,256,18) |100 |58\t |\n" }, { "alpha_fraction": 0.6146166324615479, "alphanum_fraction": 0.6405750513076782, "avg_line_length": 33.239437103271484, "blob_id": "e69fdbedce988ef531ccf9efda9fb18fc0ca86c0", "content_id": "7795fcc1cd8f2ccdba65a0737854f9111098c9cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2504, "license_type": "no_license", "max_line_length": 101, "num_lines": 71, "path": "/tfnet.py", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "\r\nimport tensorflow as tf\r\nimport random\r\n\r\n# Hyperparameters\r\nlearning_rate = 0.001\r\ntraining_epochs = 100\r\n\r\n# Network\r\nn_hidden_1 = 512 # 1st layer number of features\r\nn_hidden_2 = 256 # 2nd layer number of features\r\nn_input = 7 # After performing PCA the input size becomes 5\r\nn_classes = 19 # The different types of a Pokemon\r\n\r\ndef main(train_data, train_labels, test_data, test_labels):\r\n\t# Create a one hot representation of the labels\r\n\ttrain_labels_new = []\r\n\ttest_labels_new = []\r\n\tfor lab in train_labels:\r\n\t\tlabel_new = []\r\n\t\tfor lab_test in range(1,20):\r\n\t\t\tif lab == lab_test:\r\n\t\t\t\tlabel_new.append(1)\r\n\t\t\telse:\r\n\t\t\t\tlabel_new.append(0)\r\n\t\ttrain_labels_new.append(label_new)\r\n\tfor lab in test_labels:\r\n\t\tlabel_new = []\r\n\t\tfor lab_test in range(1,20):\r\n\t\t\tif lab == lab_test:\r\n\t\t\t\tlabel_new.append(1)\r\n\t\t\telse:\r\n\t\t\t\tlabel_new.append(0)\r\n\t\ttest_labels_new.append(label_new)\r\n\r\n\tX = tf.placeholder(\"float\", [None, n_input])\r\n\tY = tf.placeholder(\"float\", [None, n_classes])\r\n\tweights = {\r\n\t 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\r\n\t 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\r\n\t 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\r\n\t}\r\n\tbiases = {\r\n\t 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\r\n\t 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\r\n\t 'out': tf.Variable(tf.random_normal([n_classes]))\r\n\t}\r\n\r\n\th_layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])\r\n\th_layer_1 = tf.nn.relu(h_layer_1)\r\n\th_layer_2 = tf.add(tf.matmul(h_layer_1, weights['h2']), biases['b2'])\r\n\th_layer_2 = tf.nn.relu(h_layer_2)\r\n\tout_layer = tf.matmul(h_layer_2, weights['out']) + biases['out']\r\n\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(out_layer, Y))\r\n\toptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n\t# Initialize tensorflow\r\n\tinit = tf.global_variables_initializer()\r\n\r\n\twith tf.Session() as ses:\r\n\t\tses.run(init)\r\n\t\tfor ep in range(training_epochs):\r\n\t\t\tloss_avg = 0\r\n\t\t\tfor point in range(len(train_data)):\r\n\t\t\t\t_, c = ses.run([optimizer, cost], feed_dict={X:[train_data[point]], Y:[train_labels_new[point]]})\r\n\t\t\t\tloss_avg += c\r\n\t\t\tprint (\"Epoch \" + str(ep) + \", \" + \"Loss \" + str(loss_avg/len(train_data)))\r\n\r\n\t\t# Test the trained model\r\n\t\tcorrect_pred2 = tf.nn.in_top_k(out_layer, tf.cast(tf.argmax(Y,1), \"int32\"), 5)\r\n\t\taccuracy2 = tf.reduce_mean(tf.cast(correct_pred2, \"float\"))\r\n\t\tprint (\"Accuracy \" + str(accuracy2.eval({X:test_data, Y:test_labels_new})*100))\r\n" }, { "alpha_fraction": 0.6890838146209717, "alphanum_fraction": 0.7046783566474915, "avg_line_length": 20.851064682006836, "blob_id": "9e4d8b3f5d8c8cb6fac86786ed86a2920772ccba", "content_id": "27f3fcde5cd3eb5a142a531a2a99ecbd3d60fce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 63, "num_lines": 47, "path": "/main.py", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 vishalapr <vishalapr@vishal-Lenovo-G50-70>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\n\"\"\"\n\nfrom preprocess import *\nimport tfnet\nimport warnings\nimport numpy as np\nfrom random import sample\n# Ignore deprecation warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\ndata, labels_temp = get_data('Pokemon.csv')\nlabels = []\nfor label in labels_temp:\n\tlabels.append(classes[label])\ndata = normalize_data(data)\n\n# Split data into 1/10 test and 9/10 train\nstart_idx = 0\nindices = sorted(sample(range(len(data)), len(data)/10))\ntest_data = []\ntest_labels = []\ntrain_data = []\ntrain_labels = []\n\nprint indices\n\nfor i in range(len(data)):\n\tif start_idx < len(indices) and indices[start_idx] == i:\n\t\ttest_data.append(data[i])\n\t\ttest_labels.append(labels[i])\n\t\tstart_idx += 1\n\telse:\n\t\ttrain_data.append(data[i])\n\t\ttrain_labels.append(labels[i])\n\nprint len(train_data), len(test_data)\ntfnet.main(train_data, train_labels, test_data, test_labels)" }, { "alpha_fraction": 0.627185583114624, "alphanum_fraction": 0.6573604345321655, "avg_line_length": 31.787036895751953, "blob_id": "81f7412dd09055c6f9e53046e1580aa573553347", "content_id": "5d595a9b93e57d3dc21538dc25821040f2d53972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3546, "license_type": "no_license", "max_line_length": 121, "num_lines": 108, "path": "/MLP.py", "repo_name": "TheMLGuy/Pokemon-Type-Classification-Challenge", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 7 21:09:09 2017\n\n@author: Ashwin\n\"\"\"\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\npokemonDF=pd.read_csv('Pokemon.csv')\n#display first 6 rows of data\npokemonDF.head(6)\npokemonDF.keys()\n#define hyperparameters\nlearning_rate=0.001\nepoch=100\nbatch_size=1\nn_classes=18\nn_hidden1=128\nn_hidden2=256\n#define input data and output labels\ninput_data=pokemonDF[['Total','HP','Attack','Defense','Sp. Atk','Sp. Def','Speed']]\nlabel=pokemonDF['Type 1']\nX=tf.placeholder(tf.float32,[None,7])\ny=tf.placeholder(tf.float32,[None,n_classes])\n\n\ninput_data_arr=np.array(input_data,dtype=float)\noriginalY=label.unique()\noneHotLabel=[]\nfor j in label:\n labelSet=[]\n for i in originalY:\n if j==i:\n labelSet.append(1)\n else:\n labelSet.append(0)\n oneHotLabel.append(labelSet) \n#normalize data\n\ninput_data_arr=(input_data_arr-input_data_arr.mean())/input_data_arr.std()\n#define training set and test set\n\n\ntestData=input_data_arr[0:101]\ntestlabel=np.array(oneHotLabel[0:101],dtype=float)\n\ntrainLabel=np.array(oneHotLabel[101:801],dtype=float)\ntrainData=input_data_arr[101:801]\n \n \n#define weights and biases for a model that consists of\n#2 hidden layers\nweights= {\n 'w1':tf.Variable(tf.random_normal([trainData.shape[1],n_hidden1])),\n 'w2':tf.Variable(tf.random_normal([n_hidden1,n_hidden2])),\n 'out':tf.Variable(tf.random_normal([n_hidden2,n_classes])) \n }\n \nbiases = {\n 'b1':tf.Variable(tf.random_normal([n_hidden1])),\n 'b2':tf.Variable(tf.random_normal([n_hidden2])),\n 'out':tf.Variable(tf.random_normal([n_classes])) \n } \n\n#define model\n\nlayer1=tf.matmul(X,weights['w1'])+biases['b1'] \nlayer1=tf.nn.relu(layer1)\nlayer2=tf.matmul(layer1,weights['w2'])+biases['b2']\nlayer2=tf.nn.relu(layer2) \nout_layer=tf.matmul(layer2,weights['out'])+biases['out'] \n \n#define cost and optimizer \n\ncostFunction=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(out_layer,y))\n#costFunction=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(out_layer,y))\noptimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(costFunction)\n \n#initialize tensorflow\ninit=tf.global_variables_initializer()\n\nwith tf.Session() as ses:\n ses.run(init)\n for ep in range(epoch):\n avg_loss=0\n for batch_element in range(len(trainData)):\n _,c= ses.run([optimizer,costFunction],feed_dict={X:[trainData[batch_element]],y:[trainLabel[batch_element]]})\n avg_loss+=c\n print(\"Epoch \"+str(ep)+\" loss-> \" +str(avg_loss/len(trainData)))\n \n#evaluate prediction result\n correct_pred2 = tf.nn.in_top_k(out_layer, tf.cast(tf.argmax(y,1), \"int32\"), 5)\n accuracy2 = tf.reduce_mean(tf.cast(correct_pred2, tf.float32))\n print (\"Accuracy of 'in top k' evaluation method \" + str(accuracy2.eval({X:testData, y:testlabel})*100))\n print(\"gonna predict classes for test data\\n\")\n# pred=tf.argmax(y,1)\n# print(\" prediction \"+pred.eval(feed_dict={X:testData}, session=ses))\n \n \n pred=tf.equal(tf.argmax(out_layer,1),tf.argmax(y,1))\n accuracy=tf.reduce_mean(tf.cast(pred,tf.float32))\n print(\"Accuracy of argmax method \"+str(accuracy.eval({X:testData,y:testlabel})*100))\n accuracy=tf.argmax(out_layer,1)\n print(\"predictions \"+str(accuracy.eval(feed_dict={X:testData})))\n \n" } ]
7
cvengros/unresyst
https://github.com/cvengros/unresyst
a3b8189b2bf6682dcddb4d86eba180cb427285d2
936842c9eb1b5f4948acb6e7d185b6614464bcea
6dcebc032a22f82aac6db52404c2f7e5b05b0124
refs/heads/master
2021-01-10T13:19:24.970467
2018-01-31T21:20:35
2018-01-31T21:20:35
55,167,872
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8373494148254395, "alphanum_fraction": 0.8373494148254395, "avg_line_length": 40.5, "blob_id": "d65849e870bb0af3006bd0087f2c786bde12eab5", "content_id": "5624145863bed58df98504244c5cc053f0e7fe7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 64, "num_lines": 4, "path": "/UnresystCD/code/adapter/unresyst/aggregator/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The package contains all classes for the aggregator layer.\"\"\"\n\nfrom linear_aggregator import LinearAggregator\nfrom combining_aggregator import CombiningAggregator\n" }, { "alpha_fraction": 0.5338044762611389, "alphanum_fraction": 0.5361601710319519, "avg_line_length": 38.09345626831055, "blob_id": "b360191817d609d4eb2a543c9173515247c4e54b", "content_id": "4154f74a3e83c119f5c22aa26eee045400afac5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4245, "license_type": "no_license", "max_line_length": 118, "num_lines": 107, "path": "/code/adapter/unresyst/algorithm/simple_algorithm.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"A trivial algorithm returning what it has\"\"\"\n\nfrom base import BaseAlgorithm\nfrom unresyst.models.abstractor import RelationshipInstance, \\\n PredictedRelationshipDefinition\nfrom unresyst.models.algorithm import RelationshipPredictionInstance\n\nclass SimpleAlgorithm(BaseAlgorithm):\n \n # Build phase:\n #\n \n def build(cls, recommender_model): \n \"\"\"Do nothing\"\"\"\n return\n\n # Recommend phase:\n #\n \n def get_relationship_prediction(self, recommender_model, dn_subject, dn_object, remove_predicted):\n \"\"\"See the base class for the documentation.\n \n Here - Handle remove_predicted, return the prediction if available, \n if not, return uncertain. \n \"\"\" \n\n # if predicted should be removed and the pair is in the predicted_rel, \n # return the special expectancy value\n if remove_predicted: \n # all predicted relationships\n qs_predicted = RelationshipInstance.filter_predicted(recommender_model)\n\n # the relationship between dn_subject and dn_object\n qs_predicted_rel = RelationshipInstance.filter_relationships(dn_subject, dn_object, queryset=qs_predicted)\n\n # if the prediction for the pair exists\n if qs_predicted_rel:\n \n # return the special expectancy value \n assert len(qs_predicted_rel) == 1\n predicted = qs_predicted_rel[0]\n\n return self._get_already_in_relatinship_prediction(\n recommender_model=recommender_model,\n predicted_relationship=predicted)\n \n # if prediction available, return it\n # \n \n # filter the predictions for recommender\n qs_pred = RelationshipPredictionInstance.objects.filter(\n recommender=recommender_model,\n subject_object1=dn_subject,\n subject_object2=dn_object)\n \n # if available return it\n if qs_pred: \n assert len(qs_pred) == 1\n return qs_pred[0] \n \n # otherwise return the uncertain\n return self._get_uncertain_prediction(\n recommender_model=recommender_model, \n dn_subject=dn_subject, \n dn_object=dn_object\n ) \n \n def get_recommendations(self, recommender_model, dn_subject, count, expectancy_limit, remove_predicted):\n \"\"\"See the base class for the documentation.\n \n Here - Handle remove_predicted and expectancy_limit, return the predictions \n we have. \n \n \"\"\" \n if remove_predicted:\n \n # create kwargs to exclude already liked objects\n #\n \n # get objects that are already liked\n #\n qs_predicted = RelationshipInstance.filter_predicted(recommender_model) \n\n # get ids of subjectobjects where dn_subject appears\n predicted_obj_ids = qs_predicted.filter( \n subject_object1=dn_subject).values_list('subject_object2__pk', flat=True)\n \n # construct the arguments to the exclude method.\n exclude_args = {\n # the ones that have the predicted def\n 'subject_object2__pk__in': predicted_obj_ids,\n }\n \n else:\n exclude_args = {} \n \n # get the recommendations ordered by the expectancy from the largest\n recommendations = RelationshipPredictionInstance.objects\\\n .filter(\n subject_object1=dn_subject,\n recommender=recommender_model, \n expectancy__gt=expectancy_limit)\\\n .exclude(**exclude_args)\\\n .distinct()\\\n .order_by('-expectancy')\n \n return list(recommendations[:count]) \n \n\n" }, { "alpha_fraction": 0.6227883696556091, "alphanum_fraction": 0.6234961152076721, "avg_line_length": 19.171428680419922, "blob_id": "7b151d19420122a4838ff7654ddc2b093c930e8a", "content_id": "537dc6792f7ef7acf9687c2c64c514516378f46d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1413, "license_type": "no_license", "max_line_length": 113, "num_lines": 70, "path": "/code/adapter/setup.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# parametry: \n# co chces vlozit do databaze: lastfm, demo\n# dontdrop: pokud se preda, nedropuje se databaze\n\n\nLASTFM=false\nDEMO=false\nFLIXSTER=false\nDONTDROP=false\nTRAVEL=false\n\nfor param in $*;\ndo \n case $param in\n 'lastfm')\n LASTFM=true\n ;;\n 'demo')\n DEMO=true\n ;;\n 'flixster')\n FLIXSTER=true\n ;;\n 'travel')\n TRAVEL=true\n ;;\n 'dontdrop')\n DONTDROP=true\n ;;\n esac\ndone\n\nif [ $DONTDROP = false ]\nthen\n # drop and create database\n echo \"Dropping and creating database.\"\n echo \"DROP DATABASE IF EXISTS adapter; CREATE DATABASE adapter CHARACTER SET utf8;\" | mysql --user=root mysql\nfi\n\n# syncdb\npython ./manage.py syncdb --noinput > /dev/null\n\n\nif [ $DEMO = true ]\nthen\n echo \"Adding demo data.\"\n echo \"from demo.save_data import save_data; save_data(); quit();\" | python ./manage.py shell\nfi\n\nif [ $LASTFM = true ]\nthen\n echo \"Adding last.fm data.\"\n echo \"from lastfm.save_data import save_data; save_data(); quit();\" | python ./manage.py shell\nfi\n\nif [ $FLIXSTER = true ]\nthen\n echo \"Adding flixster data.\"\n echo \"from flixster.save_data import save_data; save_data(); quit();\" | python ./manage.py shell\nfi\n\nif [ $TRAVEL = true ]\nthen\n echo \"Adding travel data.\"\n echo \"from travel.save_data import save_data; save_data(); quit();\" | python ./manage.py shell\nfi\n\necho \"\" \n" }, { "alpha_fraction": 0.6679438352584839, "alphanum_fraction": 0.6679438352584839, "avg_line_length": 40.21052551269531, "blob_id": "a3c7981df115919cbbc332ce1d569cc02eabfccd", "content_id": "15c9154a172b54dab069ff3133db48e3b6696594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 783, "license_type": "no_license", "max_line_length": 102, "num_lines": 19, "path": "/UnresystCD/code/adapter/unresyst/combinator/average_combinator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Combinator using a weighted average.\"\"\"\n\nfrom base import BaseCombinator\n\nclass AverageCombinator(BaseCombinator):\n \"\"\"A combinator using weighted average\n \"\"\"\n \n def _combine(self, combination_elements, ResultClass):\n \"\"\"See the base class for documentation\"\"\"\n \n # sort it in order to provide the right order of explanations\n combination_elements.sort(key=lambda el: el.get_expectancy(), reverse=True) \n\n # count the average and concatenation of the descriptions\n avgexp = sum([ce.get_expectancy() for ce in combination_elements]) / len(combination_elements)\n desc = self._concat_descriptions(combination_elements)\n \n return ResultClass(expectancy=avgexp, description=desc)\n" }, { "alpha_fraction": 0.543181300163269, "alphanum_fraction": 0.5789534449577332, "avg_line_length": 29.939067840576172, "blob_id": "3657a4cf62ca68f99f3d0e4c08af7f73cb470560", "content_id": "c6f1c1c9ec8d9c2e872e62ca38d245a783ff6b0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8638, "license_type": "no_license", "max_line_length": 168, "num_lines": 279, "path": "/code/adapter/travel/save_data.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Saving data from the last.fm datasets\"\"\"\n\nimport csv\nimport os\nimport re\nfrom datetime import datetime\nfrom dateutil.parser import parse\n\nfrom models import *\n\n\n\nregexp_url = r'/zajezdy/zobrazit/(?P<tour_type>[\\w-]+)/(?P<country_name>[\\w-]+)/(?P<tour_name>[\\w-]+)/*'\n\n# relative paths to the dataset files\nfilename_all = '../../datasets/travel/travel.csv'\n\nseparator = ','\n\ndecay_secs = 1200\n\nACTIONS = {\n 'HREF_CLICK': Click,\n 'ON_PAGE': MouseMove,\n 'OBJECT_INTERACTION': {\n 'ORDER': Order,\n 'QUESTION': Question,\n },\n 'PAGE_OPEN': ViewProfile,\n 'PAGE_CLOSE': None,\n}\n\"\"\"Dictionary action: class to save\"\"\"\n\ndef save_data():\n \"\"\"Save the data\"\"\"\n \n _parse_all(filename_all)\n print \"All saved.\"\n \n\n\"\"\"\n1,3,1,2010-04-23 11:37:34,PAGE_OPEN,REF: http://slantour.cz/zajezdy/katalog/za-sportem,/\n2,3,1,2010-04-23 11:37:35,PAGE_CLOSE,,/\n4,3,1,2010-04-23 11:37:36,PAGE_OPEN,REF: http://slantour.cz/,/zajezdy/katalog/pobytove-zajezdy\n5,3,1,2010-04-23 11:37:37,PAGE_CLOSE,,/zajezdy/katalog/pobytove-zajezdy\n7,3,1,2010-04-23 11:37:38,PAGE_OPEN,REF: http://slantour.cz/zajezdy/katalog/pobytove-zajezdy,/zajezdy/katalog/pobytove-zajezdy/ceska-republika\n8,3,1,2010-04-23 11:37:39,PAGE_CLOSE,,/zajezdy/katalog/pobytove-zajezdy/ceska-republika\n\nBefore removal:\n>>> from travel.models import *\n>>> from django.db.models import *\n>>> User.objects.count()\n12991\n>>> Click.objects.count()\n17426\n>>> Question.objects.count()\n21\n>>> Order.objects.count()\n227\n>>> MouseMove.objects.count()\n68759\n>>> Session.objects.count()\n14409\n>>> Tour.objects.count()\n702\n>>> TourType.objects.count()\n6\n>>> TourType.objects.all()\n[<TourType: pobytove-zajezdy>, <TourType: poznavaci-zajezdy>, <TourType: za-sportem>, <TourType: lazenske-pobyty>, <TourType: jednodenni-zajezdy>, <TourType: lyzovani>]\n>>> Country.objects.count()\n50\n>>> User.objects.annotate(scount=Count('session')).filter(scount__gt=1).count()\n834\n>>> ViewProfile.objects.count()\n40766\n\n>>> User.objects.annotate(ocount=Count('session__order')).filter(ocount__gte=1).count()\n193\n>>> User.objects.annotate(ocount=Count('session__order')).filter(ocount__gte=1).annotate(vcount=Count('session__viewprofile')).aggregate(Avg('vcount'))\n{'vcount__avg': 9.3161000000000005}\n>>> User.objects.annotate(ocount=Count('session__order')).filter(ocount__gte=1).annotate(vcount=Count('session__mousemove')).aggregate(Avg('vcount'))\n{'vcount__avg': 28.590699999999998}\n>>> User.objects.annotate(ocount=Count('session__order')).filter(ocount__gte=1).annotate(vcount=Count('session__question')).aggregate(Sum('vcount'))\n{'vcount__sum': 5}\n>>> User.objects.annotate(ocount=Count('session__order')).filter(ocount__gte=1).annotate(vcount=Count('session__click')).aggregate(Avg('vcount'))\n{'vcount__avg': 3.9430000000000001}\n\nAfter removal:\n>>> User.objects.count()\n193\n>>> Click.objects.count()\n672\n>>> Click.objects.values_list('session__user', 'tour').distinct().count()\n226\n>>> Question.objects.count()\n5\n>>> Question.objects.values_list('session__user', 'tour').distinct().count()\n4\n>>> Order.objects.count()\n227\n>>> Order.objects.values_list('session__user', 'tour').distinct().count()\n194\n>>> MouseMove.objects.count()\n4570\n>>> MouseMove.objects.values_list('session__user', 'tour').distinct().count()\n551\n>>> Session.objects.count()\n333\n>>> Tour.objects.count()\n753\n>>> TourType.objects.count()\n6\n>>> Country.objects.count()\n50\n>>> ViewProfile.objects.count()\n1551\n>>> ViewProfile.objects.values_list('session__user', 'tour').distinct().count()\n619\n>>> ViewProfile.objects.aggregate(Max('duration'))\n{'duration__max': 1195}\n>>> 1195/60\n19\n>>> ViewProfile.objects.aggregate(Avg('duration'))\n{'duration__avg': 88.494500000000002}\n>>> User.objects.annotate(hh=Count('session__click')).filter(hh=0).count()\n67\n>>> User.objects.annotate(hh=Count('session__mousemove')).filter(hh=0).count()\n0\n>>> User.objects.annotate(hh=Count('session__viewprofile')).filter(hh=0).count() \n8\n>>> Tour.objects.filter(viewprofile__isnull=False).distinct().count()\n288\n\nAfter 0.2 division\n45 test pairs selected from total 227 pairs.\n>>> ViewProfile.objects.count()\n1226\n>>> MouseMove.objects.count()\n3645\n>>> Click.objects.count()\n522\n>>> Question.objects.count()\n4\n\n\n\"\"\"\n\ndef _parse_all(filename):\n \"\"\"Parse the mega csv file\"\"\"\n \n filename = _get_abs_path(filename) \n\n # open the csv reader\n reader = csv.reader(open(filename, \"rb\"), delimiter=separator, quoting=csv.QUOTE_NONE)\n\n # compile the pattern\n pattern = re.compile(regexp_url)\n \n # open pages \n open_pages = {}\n \n try:\n for x, user_id, user_session_id, timestamp, action, action_parameter, object_ in reader:\n \n if reader.line_num % 5000 == 0:\n print '%s lines processed' % reader.line_num \n \n cur_timestamp = parse(timestamp)\n\n # do the cleanup\n open_pages = dict((k, view_page) for (k, view_page) in open_pages.items() \n if (cur_timestamp - view_page.timestamp).seconds > decay_secs)\n \n # parse the ids\n user_id = int(user_id)\n \n # take only users that bought something\n if not (user_id in USERS_IDS_ORDER):\n continue\n \n # find out whether it's done on an interesting object - tour.\n # try matching the object, if it's not, go on\n m = pattern.match(object_)\n \n if not m:\n continue\n\n # try getting the action class, if not available, continue\n try:\n act_class = ACTIONS[action]\n except KeyError:\n continue\n \n # for object interaction it's a bit more difficult\n if action == 'OBJECT_INTERACTION': \n try:\n act_class = act_class[action_parameter]\n except KeyError:\n continue\n \n \n # prepare the user\n #\n\n # parse the ids\n user_session_id = int(user_session_id)\n \n # get or create the user, session\n user, x = User.objects.get_or_create(id=user_id)\n session, x = Session.objects.get_or_create(user=user, session_no=user_session_id)\n \n \n # prepare the tour\n #\n \n # get the matched stuff\n country_name = m.group('country_name')\n tour_type_name = m.group('tour_type')\n tour_name = m.group('tour_name')\n \n # get or create the country, tour type.\n country, x = Country.objects.get_or_create(name=country_name)\n tour_type, x = TourType.objects.get_or_create(name=tour_type_name)\n tour, x = Tour.objects.get_or_create(\n name=tour_name, \n defaults={\n 'country': country,\n 'tour_type': tour_type,\n 'url': object_,\n }\n )\n \n # parse the date\n timestamp = parse(timestamp)\n \n # open/close page need special handling\n #\n \n if action == 'PAGE_OPEN':\n \n view_page = act_class(\n session=session,\n tour=tour,\n timestamp=timestamp)\n \n # save the action to the dictionary waiting for the close event\n open_pages[(tour, user)] = view_page\n \n continue\n \n if action == 'PAGE_CLOSE':\n\n # try finding the open, if not present, go ahead\n try:\n view_page = open_pages[(tour, user)]\n except KeyError:\n continue\n \n # count the duration and save the event if reasonable\n view_page.duration = (timestamp - view_page.timestamp).seconds\n\n if view_page.duration < decay_secs:\n view_page.save()\n\n continue\n \n # create the action\n act_class.objects.create(\n session=session,\n tour=tour,\n timestamp=timestamp)\n except:\n print \"error on line %d\" % reader.line_num\n raise\n\n \ndef _get_abs_path(filename):\n \"\"\"Get the absolute path from the relative\"\"\"\n return os.path.join(os.path.dirname(__file__), filename)\n \n\n" }, { "alpha_fraction": 0.5779221057891846, "alphanum_fraction": 0.5811688303947449, "avg_line_length": 41.573768615722656, "blob_id": "ae8bdf6dbdf6d68324e4adf9ea944a09bec444db", "content_id": "3827e6e651659b6d0d80cecd983cb3378fa667d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5236, "license_type": "no_license", "max_line_length": 119, "num_lines": 122, "path": "/UnresystCD/code/adapter/unresyst/aggregator/combining_aggregator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The aggregator that uses a combinator to aggregate\"\"\"\n\nfrom base import BaseAggregator\nfrom unresyst.constants import *\nfrom unresyst.models.abstractor import RelationshipInstance, \\\n PredictedRelationshipDefinition, Cluster, BiasInstance\nfrom unresyst.models.aggregator import AggregatedRelationshipInstance, \\\n AggregatedBiasInstance\nfrom unresyst.combinator.combination_element import RelSimilarityCombinationElement, \\\n ClusterSimilarityCombinationElement, BiasCombinationElement \n\nclass CombiningAggregator(BaseAggregator):\n \"\"\"A class using unresyst.combinator for creating aggregates\"\"\"\n \n def aggregate_rules_relationships(self, recommender_model):\n \"\"\"See the base class for documentation.\n \n Does only similarity aggregation + clusters, the preference aggregation is left out\n for the algorithm.\n \"\"\"\n # get pairs that have some similarities\n # \n \n # take all rule/relationship instances, that don't belong \n # to the predicted_relationship - their id unique pairs\n #\n \n predicted_def = PredictedRelationshipDefinition.objects.get(\n recommender=recommender_model)\n\n qs_similarities = RelationshipInstance.objects\\\n .exclude(definition=predicted_def)\\\n .filter(definition__recommender=recommender_model)\\\n .filter(definition__rulerelationshipdefinition__relationship_type__in=[\n RELATIONSHIP_TYPE_OBJECT_OBJECT,\n RELATIONSHIP_TYPE_SUBJECT_SUBJECT,\n RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT]) \n \n # the unique pairs of entities between which there's some similarity\n qs_id_pairs = qs_similarities\\\n .values_list('subject_object1__id', 'subject_object2__id')\\\n .distinct()\n \n # for all pairs that have some similarity\n for id1, id2 in qs_id_pairs.iterator():\n \n combination_elements = []\n\n # similarities:\n #\n \n # filter similarities that are for the pair\n # only one direction is needed, because we're taking it from the same source\n qs_pair_similarity = qs_similarities.filter(\n subject_object1__id=id1, \n subject_object2__id=id2)\n \n # save the entity type - there must be always at least\n # one similarity relationship for the pair, so this is\n # always filled.\n relationship_type = qs_pair_similarity[0].definition.as_leaf_class().relationship_type\n \n # create combining elements from them\n for pair_sim in qs_pair_similarity.iterator():\n\n el = RelSimilarityCombinationElement(rel_instance=pair_sim)\n combination_elements.append(el)\n \n # clusters:\n #\n \n # get clusters the two have in common\n qs_common_clusters = Cluster.objects.filter(clustermember__member__id=id1)\\\n .filter(clustermember__member__id=id2)\n \n # create the compiling elements from them\n for c in qs_common_clusters.iterator():\n \n el = ClusterSimilarityCombinationElement(\n cluster_members=[\n c.clustermember_set.get(member__id=id1),\n c.clustermember_set.get(member__id=id2)]\n )\n combination_elements.append(el) \n \n # aggregation and save:\n #\n \n # aggregate the similarity through the combinator\n aggr = self.combinator.combine_pair_similarities(\n combination_elements=combination_elements)\n \n # create and save the aggregated similarity instance\n aggr.subject_object1_id = id1\n aggr.subject_object2_id = id2\n aggr.recommender = recommender_model\n aggr.relationship_type = relationship_type\n aggr.save()\n\n \n def aggregate_biases(self, recommender_model):\n \"\"\"See the base class for documentation.\n \"\"\"\n \n # all available biases\n qs_biases = BiasInstance.objects.filter(definition__recommender=recommender_model)\n \n # get entities that have some biases: subject/object anything\n qs_ids = qs_biases.values_list('subject_object__id', flat=True).distinct()\n \n for ent_id in qs_ids:\n \n # get the biases for the entity\n ent_biases = [BiasCombinationElement(bias_instance=b) for b in qs_biases.filter(subject_object__id=ent_id)]\n \n # throw them to the combinator\n aggr = self.combinator.combine_entity_biases(entity_biases=ent_biases)\n \n # fill the missing fields and save\n aggr.subject_object_id = ent_id\n aggr.recommender = recommender_model\n aggr.save()\n \n \n" }, { "alpha_fraction": 0.5399905443191528, "alphanum_fraction": 0.5416469573974609, "avg_line_length": 39.08571243286133, "blob_id": "f38ea2de0a428fe42518e9412d418ab5fea47683", "content_id": "de94d585369e1dd52723d67f7d3d2341ba290dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4226, "license_type": "no_license", "max_line_length": 125, "num_lines": 105, "path": "/UnresystCD/code/adapter/unresyst/compilator/combining_compilator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The combining compilator class\"\"\"\n\nfrom base import BaseCompilator\nfrom unresyst.constants import *\nfrom unresyst.models.common import SubjectObject\nfrom unresyst.exceptions import RecommenderBuildError\n\nclass CombiningCompilator(BaseCompilator):\n \"\"\"The compilator using the given combinator to combine the predictions\n that it reveals\n \"\"\" \n \n def __init__(\n self, \n combinator, \n depth=DEFAULT_COMPILATOR_DEPTH, \n breadth=DEFAULT_COMPILATOR_BREADTH,\n pair_depth=DEFAULT_COMPILATOR_PAIR_DEPTH):\n \"\"\"The initializer, combinator is not optional.\"\"\" \n \n super(CombiningCompilator, self).__init__(\n combinator=combinator, \n depth=depth, \n breadth=breadth,\n pair_depth=pair_depth)\n \n def compile_prediction(self, recommender_model, dn_subject, dn_object):\n \"\"\"Create a prediction using all available information and the instance combinator.\n \n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender model\n \n @type dn_subject: SubjectObject\n @param dn_subject: the domain neutral subject\n \n @type dn_object: SubjectObject\n @param dn_object: the domain neutral object\n \n @rtype: RelationshipPredictionInstance\n @return: the prediction from what we know, if we don't know anything return None.\n \"\"\"\n # find all we know about the subject - object pair - biases, similarities, clusters,... \n els = self.get_pair_combination_elements(dn_subject=dn_subject, dn_object=dn_object)\n \n if not els:\n return None\n \n # pass it all to the combinator to get the prediction\n pred = self.combinator.combine_pair_prediction_elements(combination_elements=els)\n\n # fill the missing fields in the prediction and save\n pred.recommender = recommender_model\n pred.subject_object1 = dn_subject\n pred.subject_object2 = dn_object\n return pred\n\n \n def compile_all(self, recommender_model):\n \"\"\"Compile preferences, known relationships + similarities.\n \"\"\"\n \n if not self.breadth:\n return\n \n # go through the sujbects (or subjectobjects)\n #\n \n subject_ent_type = ENTITY_TYPE_SUBJECTOBJECT if recommender_model.are_subjects_objects \\\n else ENTITY_TYPE_SUBJECT \n \n qs_subjects = SubjectObject.objects.filter(\n recommender=recommender_model, \n entity_type=subject_ent_type)\n\n print \" Compiling predictions for %d subjects.\" % qs_subjects.count()\n i = 0\n \n for subj in qs_subjects.iterator():\n \n # get the most promising objects for the subject\n promising_objects = self.combinator.choose_promising_objects(\n dn_subject=subj, \n min_count=self.breadth) \n \n if i % 20 == 0:\n print \" %d subjects processed. Current promising object count: %d\" % (i, len(promising_objects))\n i += 1\n \n # go through the promising objects\n for obj in promising_objects:\n \n # compile the predition and save it\n pred = self.compile_prediction(\n recommender_model=recommender_model,\n dn_subject=subj,\n dn_object=obj)\n\n # if the compilate is empty, it's an error \n if not pred:\n raise RecommenderBuildError(\n message=\"Nothing found for the pair %s, %s, although the object was in promising.\" \\\n % (subj, obj),\n recommender=recommender_model)\n \n pred.save()\n \n" }, { "alpha_fraction": 0.6433120965957642, "alphanum_fraction": 0.7579618096351624, "avg_line_length": 50, "blob_id": "913794b8d0e2e76a7cd46eeed143ad41269ec4e2", "content_id": "5cb1ba6533861bde80ac13036b5c8d536c48e490", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 157, "license_type": "no_license", "max_line_length": 100, "num_lines": 3, "path": "/UnresystCD/code/dataset_scripts/process_lastfm.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\nsed -n '1~750p' /home/pcv/Desktop/userid-timestamp-artid-artname-traid-traname.tsv > tracks30000.tsv\n\nhead -200000 artist_tags.tsv > artist_tags200K.tsv \n\n\n" }, { "alpha_fraction": 0.7053459286689758, "alphanum_fraction": 0.7182297110557556, "avg_line_length": 43.11070251464844, "blob_id": "a9d168831822b1242bfbc717004f4fb39d178621", "content_id": "f6e44ad26ed136b9bcb9d8cb905bf8ad1b561740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11953, "license_type": "no_license", "max_line_length": 496, "num_lines": 271, "path": "/docs/CreateMahoutRecommender.md", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "# Mahout for Newbies: How to Create a Recommender #\nA short tutorial on actions you have to take to create your first Mahout recommender.\n\n## Introduction ##\n\n[Mahout](http://mahout.apache.org/) is a library containing scalable implementation of many machine learning algorithms, notably the collaborative filtering and other algorithms used in recommender systems.\n\nThere are many tutorials on creating simple or more complex recommenders but for Mahout newbies it's quite difficult to cope with all the technical details you have to do to make Mahout work in your way.\n\nI'm not an Eclipse fan, so all instructions use only the shell console. I was running the examples on Ubuntu 10.04, but it should be working on most of the linux distros and even on windows with cygwin.\n\nThis page is aiming to be a step-by-step description for Mahout/Maven beginners. I'm also a Mahout/Maven beginner, so any corrections and comments are more then welcome.\n\n## Part 1: Installation ##\nThere's a good [installation guide in the Mahout wiki](https://cwiki.apache.org/confluence/display/MAHOUT/BuildingMahout).\n\nI'm running the 0.4 Mahout release, so all stuff in this article is primarily for that, but should be working also on trunk and other releases.\n\nAs described in the guide, after JDK and Maven you should do something like:\n```\ncd core\nmvn compile\nmvn install\n```\n\nA few things to note here:\n * If you installation fails, try the fix proposed in the tutorial:\n```\nrm -rf ~/.m2/\nmvn clean install\n```\n * If this doesn't help, try again\n```\nmvn install\n```\nand look to the `core/target/surefire-reports/`. If you can find the cause of the problem there, you're a lucky person. If not, you can seek help on the `[email protected]` mailing list as [I successfully did.](http://comments.gmane.org/gmane.comp.apache.mahout.user/5881)\n * Alternatively, you can try running the install without tests\n```\nmvn -Dmaven.test.skip=true install\n```\n\n## Part 2: Creating a simple recommender ##\n\nNow that you have mahout successfully installed, you can try some built-in examples, as [described in the Mahout wiki](https://cwiki.apache.org/confluence/display/MAHOUT/RecommendationExamples). Firstly you have to download some dataset, and then pass it as the `-i` parameter. It's a kind of a detective work but you can find some hints in the source code of the examples. I've successfully run the recommender on [the Book Crossing dataset](http://www.informatik.uni-freiburg.de/~cziegler/BX/)\n\nAnd now to creating your own recommender on your own data. Editing the examples isn't a good idea, because the code gets quite messy and all the paths are terribly long. Instead of that follow the oncoming steps.\n\n### Create a Maven project ###\nFirstly, you have to create a maven project. Go to the Mahout directory (the parent of the core directory) and run something like:\n```\nmvn archetype:create -DarchetypeGroupId=org.apache.maven.archetypes -DgroupId=com.unresyst -DartifactId=mahoutrec\n```\nThis creates an empty project called `mahoutrec` with the package namespace `com.unresyst`. Now change to the `mahoutrec` directory. You can try out the new project by running:\n```\nmvn compile\nmvn exec:java -Dexec.mainClass=\"com.unresyst.App\"\n```\nThis should print the Hello world message. If not try passing the `-e` option for the second command and try to realize what's going wrong.\n\nMore info on creating a Maven project can be found in [the official guide](http://maven.apache.org/guides/getting-started/)\n\n### Set the project dependencies ###\nNow that you have your maven project, let's set it up for your Mahout instalation.\n\nOpen the `pom.xml` file from your project directory (`mahoutrec`) in your favourite text editor. And now:\n * add the Mahout dependencies to your `pom.xml`. The following `<dependency>` elements must be under the `<dependencies>` parent element.\n```\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-core</artifactId>\n <version>0.4</version>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-math</artifactId>\n <version>0.4</version>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-math</artifactId>\n <version>0.4</version>\n <type>test-jar</type>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-utils</artifactId>\n <version>0.4</version>\n </dependency>\n```\n\n> If you are using other than the 0.4 version replace the 0.4 by the number of your version.\n * Set the relative path to the parent project `pom.xml` by adding the `<relativePath>` element to the `<parent>` element:\n```\n <relativePath>../pom.xml</relativePath> \n```\nYour whole `pom.xml` should look like that:\n```\n<?xml version=\"1.0\"?>\n<project xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n <modelVersion>4.0.0</modelVersion>\n <parent>\n <artifactId>mahout</artifactId>\n <groupId>org.apache.mahout</groupId>\n <version>0.4</version>\n <relativePath>../pom.xml</relativePath> \n </parent>\n <groupId>com.unresyst</groupId>\n <artifactId>mahoutrec</artifactId>\n <version>1.0-SNAPSHOT</version>\n <name>mahoutrec</name>\n <url>http://maven.apache.org</url>\n <dependencies>\n <dependency>\n <groupId>junit</groupId>\n <artifactId>junit</artifactId>\n <version>3.8.1</version>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-core</artifactId>\n <version>0.4</version>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-math</artifactId>\n <version>0.4</version>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-math</artifactId>\n <version>0.4</version>\n <type>test-jar</type>\n <scope>test</scope>\n </dependency>\n <dependency>\n <groupId>org.apache.mahout</groupId>\n <artifactId>mahout-utils</artifactId>\n <version>0.4</version>\n </dependency>\n </dependencies>\n <properties>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n </properties>\n</project>\n```\n\nNow try compiling your project by running\n```\nmvn compile\n```\nYou should get a Build successful message.\n\n### Create a simple recommender ###\nAnd now to the promised creation of your own recommender running on your own data. Firstly we'll create some dummy data to feed the recommender. Create a `datasets` directory in your project directory and create the `dummy-bool.csv` file with the following contents:\n```\n#userId,itemId\n1,3\n1,4\n2,44\n2,46\n3,3\n3,5\n3,6\n4,3\n4,5\n4,11\n4,44\n5,1\n5,2\n5,4\n```\nThe first line is a comment, all the others are boolean preferences for items expressed by the users. Boolean preferences are useful when you don't have any explicit user-item ratings in your data. A boolean preference can mean e.g. that the user has viewed a profile page of the item. The example file states that the user id `1` has viewed the item id `3`, and item id `4`, user `2` item `44` and `46`, and so on.\n\nNow, let's create the recommender. Create a file UnresystBoolRecommend.java in the well hidden subddirectory `src/main/java/com/unresyst/` of your project directory. Place the following content to the file\n```\npackage com.unresyst;\n\nimport java.io.File;\nimport java.io.FileNotFoundException;\nimport java.util.List;\nimport java.io.IOException;\n\nimport org.apache.commons.cli2.OptionException; \nimport org.apache.mahout.cf.taste.common.TasteException;\nimport org.apache.mahout.cf.taste.impl.model.file.FileDataModel;\nimport org.apache.mahout.cf.taste.impl.recommender.CachingRecommender;\nimport org.apache.mahout.cf.taste.impl.recommender.slopeone.SlopeOneRecommender;\nimport org.apache.mahout.cf.taste.model.DataModel;\nimport org.apache.mahout.cf.taste.recommender.RecommendedItem;\nimport org.apache.mahout.cf.taste.impl.common.LongPrimitiveIterator;\n\npublic class UnresystBoolRecommend {\n \n public static void main(String... args) throws FileNotFoundException, TasteException, IOException, OptionException {\n \n // create data source (model) - from the csv file \n File ratingsFile = new File(\"datasets/dummy-bool.csv\"); \n DataModel model = new FileDataModel(ratingsFile);\n \n // create a simple recommender on our data\n CachingRecommender cachingRecommender = new CachingRecommender(new SlopeOneRecommender(model));\n\n // for all users\n for (LongPrimitiveIterator it = model.getUserIDs(); it.hasNext();){\n long userId = it.nextLong();\n \n // get the recommendations for the user\n List<RecommendedItem> recommendations = cachingRecommender.recommend(userId, 10);\n \n // if empty write something\n if (recommendations.size() == 0){\n System.out.print(\"User \");\n System.out.print(userId);\n System.out.println(\": no recommendations\");\n }\n \n // print the list of recommendations for each \n for (RecommendedItem recommendedItem : recommendations) {\n System.out.print(\"User \");\n System.out.print(userId);\n System.out.print(\": \");\n System.out.println(recommendedItem);\n }\n } \n }\n}\n```\nSome notes to that:\n * If Java isn't your tongue language, be aware of the package declaration on the first line. If you omit that the Maven runner will never find your class.\n * In the main function we first create a data model from our csv file, then we instantiate a simple Slope One recommender filled with the data. Then we iterate over all users in the dataset and we print recommendations for them.\n * Sorry for my poor java.\n\nTo run the recommender on our dummy data, do:\n```\nmvn compile\n```\nThis should end up by BUILD SUCCESSFUL. Then run the main function:\n```\nmvn exec:java -Dexec.mainClass=\"com.unresyst.UnresystBoolRecommend\" \n```\n\nIn the middle of the Maven spam you should be able to see:\n```\nUser 1: RecommendedItem[item:5, value:1.0]\nUser 2: RecommendedItem[item:5, value:1.0]\nUser 2: RecommendedItem[item:3, value:1.0]\nUser 3: no recommendations\nUser 4: no recommendations\nUser 5: RecommendedItem[item:5, value:1.0]\nUser 5: RecommendedItem[item:3, value:1.0]\n```\nThese are recommendations for our users. Our recommender recommends the item `5` to the user `1`, with certainty 1.0 (this is a constant value for boolean recommenders), items `5` and `3` to user 2 and so on.\n\nTo reduce the Maven spam you can pass the `-q` option, to debug pass the `-e` option.\n\n## Conclusion ##\nThat's it, now you've got your first Mahout recommender. You can now experiment with various data and various recommenders as described in other Mahout recommender tutorials. Some of the good ones:\n * A good starting guide, from which the example is partly overtaken: http://philippeadjiman.com/blog/2009/11/11/flexible-collaborative-filtering-in-java-with-mahout-taste/\n * The definitive IBM guide: http://www.ibm.com/developerworks/java/library/j-mahout/\n\nThanks for reading, any suggestions, corrections and comments are appreciated.\n\nSources and further reading\n * https://cwiki.apache.org/confluence/display/MAHOUT/Mahout+Wiki\n * https://cwiki.apache.org/confluence/display/MAHOUT/BuildingMahout\n * http://maven.apache.org/plugins/maven-surefire-plugin/howto.html\n * http://www.jarvana.com/jarvana/view/org/apache/mahout/mahout-core/0.4/mahout-core-0.4-javadoc.jar!/index-all.html\n * http://shuyo.wordpress.com/2011/02/14/mahout-development-environment-with-maven-and-eclipse-2/" }, { "alpha_fraction": 0.5778035521507263, "alphanum_fraction": 0.5778035521507263, "avg_line_length": 38.21590805053711, "blob_id": "31bc717d67fe630404e9b123a8fffacc887f435a", "content_id": "b3eb7cf40522a9aca1ea39fc12659a7f44225cba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3451, "license_type": "no_license", "max_line_length": 108, "num_lines": 88, "path": "/UnresystCD/code/adapter/unresyst/algorithm/compiling_algorithm.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The CompilingAlgorithm class\"\"\"\n\nfrom base import BaseAlgorithm\n\nclass CompilingAlgorithm(BaseAlgorithm):\n \"\"\"The algorithm that compiles aggregated similarities and biases with\n the predictions.\n \"\"\"\n def __init__(self, inner_algorithm, compilator):\n \"\"\"The initializer\"\"\"\n \n super(CompilingAlgorithm, self).__init__(inner_algorithm=inner_algorithm)\n \n self.compilator=compilator\n \"\"\"The compilator that will be used during the build\"\"\"\n\n def build(self, recommender_model):\n \"\"\"See the base class for documentation.\n \n Compiles and calls the inner algorithm build\n \"\"\" \n print \" Compiling aggregates and predictions.\"\n \n self.compilator.compile_all(recommender_model) \n \n\n print \"Predictions compiled. Building the inner algorithm...\"\n \n super(CompilingAlgorithm, self).build(recommender_model=recommender_model) \n\n \n def get_relationship_prediction(self, recommender_model, dn_subject, dn_object, remove_predicted):\n \"\"\"See the base class for the documentation.\n \n Get all available info for the pair.\n \"\"\"\n \n # get what we have from the inner algo\n inner_prediction = super(CompilingAlgorithm, self).get_relationship_prediction(\n recommender_model=recommender_model,\n dn_subject=dn_subject,\n dn_object=dn_object,\n remove_predicted=remove_predicted)\n \n # if it's trivial, than return it\n if remove_predicted and inner_prediction.is_trivial:\n return inner_prediction\n \n # if it's not uncertain, it was already available for the inner algorithm,\n # return it\n if not inner_prediction.is_uncertain:\n return inner_prediction\n \n # otherwise compile the prediction from all available info \n prediction = self.compilator.compile_prediction(\n recommender_model=recommender_model,\n dn_subject=dn_subject,\n dn_object=dn_object)\n\n # if it found something, return it \n if prediction: \n return prediction\n \n # otherwise return the uncertain\n return self._get_uncertain_prediction(\n recommender_model=recommender_model, \n dn_subject=dn_subject, \n dn_object=dn_object\n ) \n \n def get_recommendations(self, recommender_model, dn_subject, count, expectancy_limit, remove_predicted):\n \"\"\"See the base class for the documentation. \n\n Here: fill the recommendations until the count\n with random predictions, fill all info we know, or create uncertain \n predictions.\n \"\"\"\n # get what we have from the inner algo\n inner_recs = super(CompilingAlgorithm, self).get_recommendations(\n recommender_model=recommender_model,\n dn_subject=dn_subject,\n count=count,\n expectancy_limit=expectancy_limit,\n remove_predicted=remove_predicted)\n\n # add there some random and find info for them \n # to samy jako nahore\n return inner_recs\n" }, { "alpha_fraction": 0.5001169443130493, "alphanum_fraction": 0.5038596391677856, "avg_line_length": 35.85344696044922, "blob_id": "7c921da5cce0e18772c9d5d422e47874ee9e14b0", "content_id": "db3e6d7cc76017b8f98f4769ac32aca2d7aac2a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4275, "license_type": "no_license", "max_line_length": 150, "num_lines": 116, "path": "/UnresystCD/code/adapter/unresyst/recommender/rank_evaluation.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Evaluator defining a method for the average prediction rank\"\"\"\n\nfrom unresyst.exceptions import EmptyTestSetError\nfrom unresyst.constants import *\nfrom unresyst.recommender.evaluation import BaseEvaluator\n\ndef index(seq, f):\n \"\"\"Return the index of the first item in seq where f(item) == True.\"\"\"\n return next((i for i in xrange(len(seq)) if f(seq[i])), None)\n\n\nclass RankEvaluator(BaseEvaluator):\n \"\"\"The evaluator for counting the average prediction rank\"\"\"\n \n SUBJ_IDS = []\n \"\"\"A list of subjects (domain specific ids) that are to be tested\"\"\"\n \n \n @classmethod\n def evaluate_predictions(cls, recommender, save_predictions=False):\n \"\"\"See the base class for documentation.\n \n Here it counts the rank, without calling any metric.\n \"\"\"\n \n # get the test pairs\n qs_pairs = cls._get_cleared_pairs()\n \n # evaluate the rank for each subject\n # \n\n obj_count = recommender.objects.count()\n rank_sum = 0\n rank_count = 0\n\n print \"Evaluating %d subjects\" % len(cls.SUBJ_IDS) \n \n \n # go through the wished subjects\n for subj, i in zip(recommender.subjects.filter(pk__in=cls.SUBJ_IDS), range(1, len(cls.SUBJ_IDS))):\n \n # if the test set for the subject is emty go ahead\n test_set_subj = qs_pairs.filter(subj=subj) \n if not test_set_subj:\n continue\n \n # create an array of object predictions for the subject\n #\n neg_array = []\n pos_array = []\n uncertain_count = 0 \n \n for obj in recommender.objects.all():\n \n # get the prediction\n exp_prediction = recommender.predict_relationship(subj, obj)\n \n if exp_prediction is None or exp_prediction.is_uncertain: \n uncertain_count += 1\n \n # otherwise add it to the right array\n else:\n arr = neg_array if exp_prediction.expectancy > UNCERTAIN_PREDICTION_VALUE \\\n else pos_array\n \n arr.append((obj, exp_prediction.expectancy))\n\n print \"all predicted\"\n\n # sort the array by expectancy\n pos_array.sort(key=lambda el: el[1], reverse=True) \n neg_array.sort(key=lambda el: el[1], reverse=True) \n\n print \"all sorted\" \n pos_count = len(pos_array)\n neg_count = len(neg_array)\n \n # count the rank that will be given to uncertains.\n if uncertain_count:\n \n # the rank of the first uncertain\n rank_first = float(pos_count) / (obj_count -1)\n \n # the rank of the last uncertain\n rank_last = float(pos_count + uncertain_count) / (obj_count - 1)\n \n uncertain_rank = (rank_first + rank_last) / 2\n \n rank_count += test_set_subj.count()\n \n # count the rank for all objects for the subjects in the test set\n for obj in test_set_subj:\n \n # try finding it in positives\n ipos = index(pos_array, lambda el: el[0] == obj)\n if not (ipos is None):\n rank_sum += float(ipos) / (obj_count - 1)\n continue\n \n # try finding it in negatives\n ineg = index(neg_array, lambda el: el[0] == obj)\n if not (ineg is None):\n rank_sum += float(pos_count + uncertain_count + ineg) / (obj_count - 1)\n continue\n \n # otherwise it's uncertain\n rank_sum += uncertain_rank\n \n print \"%d subjects evaluated. current avg rank: %f, current rank count: %d\" % (i, rank_sum/rank_count if rank_count else -1.0, rank_count)\n \n\n res = rank_sum / rank_count\n \n print \"Average rank: %f\" % res\n \n return res\n" }, { "alpha_fraction": 0.8055555820465088, "alphanum_fraction": 0.8055555820465088, "avg_line_length": 70, "blob_id": "ff968e097fa8518be7d988877b40629eb655decc", "content_id": "435580c197e421c8796039673b75a2a29be25919", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 72, "license_type": "no_license", "max_line_length": 70, "num_lines": 1, "path": "/UnresystCD/code/mahout/mahoutrec/unresystboolrecommend.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "mvn -q exec:java -Dexec.mainClass=\"com.unresyst.UnresystBoolRecommend\" \n" }, { "alpha_fraction": 0.653235673904419, "alphanum_fraction": 0.6556776762008667, "avg_line_length": 34.0428581237793, "blob_id": "c55413980319a261b4eb939e2134eb85490aabc3", "content_id": "9496068ad98a18b21d0150eb245db04ed942f8c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2457, "license_type": "no_license", "max_line_length": 82, "num_lines": 70, "path": "/UnresystCD/code/adapter/unresyst/models/aggregator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Models which instances are created by the aggregator package.\n\nThe aggregated relationship.\n\"\"\"\n\nfrom django.db import models\n\nfrom base import BaseRelationshipInstance\nfrom unresyst.constants import *\n\nclass AggregatedRelationshipInstance(BaseRelationshipInstance):\n \"\"\"A representation of an aggregated relationship between two subject/objects\n \n There can be only one aggregated relationship for each subject/object pair\n for a recommender.\n \"\"\"\n\n expectancy = models.FloatField()\n \"\"\"The probability of the relationship between subject/object.\n A number from [0, 1].\n \"\"\"\n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender it belongs to\"\"\"\n \n relationship_type = models.CharField(max_length=MAX_LENGTH_RELATIONSHIP_TYPE, \n choices=RELATIONSHIP_TYPE_CHOICES)\n \"\"\"A string indicating whether it's a S-O, O-O, S-S, or SO-SO relationship.\"\"\"\n \n additional_unique = ('recommender', )\n \"\"\"There can be multiple pairs for one recommender\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\" \n ret = super(AggregatedRelationshipInstance, self).__unicode__()\n\n return ret + \", %f\" % (self.expectancy) \n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('subject_object1', 'subject_object2', 'recommender')\n \"\"\"For each recommender there can be only one subject-object pair.\"\"\"\n \nclass AggregatedBiasInstance(models.Model):\n \"\"\"An aggregated bias of a subjectobject\"\"\" \n \n expectancy = models.FloatField()\n \"\"\"The probability of the subject/object being in the predicted_relationship\n A number from [0, 1].\n \"\"\"\n \n subject_object = models.ForeignKey('unresyst.SubjectObject')\n \"\"\"The biased subject/object.\"\"\" \n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender it belongs to\"\"\"\n \n description = models.TextField(default='', blank=True)\n \"\"\"The filled description of the aggregated bias.\"\"\" \n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\" \n return u\"%s: %f\" % (self.subject_object, self.expectancy)\n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('subject_object', 'recommender')\n \"\"\"For each recommender there can be only one subject-object pair.\"\"\" \n" }, { "alpha_fraction": 0.6512749791145325, "alphanum_fraction": 0.6529979109764099, "avg_line_length": 41.67647171020508, "blob_id": "bfc9e424f3f5bdfbb30e9355241bfcb7ef2d306e", "content_id": "ed6202f65567939db68bc509050d78c74a0f14f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2902, "license_type": "no_license", "max_line_length": 129, "num_lines": 68, "path": "/code/mahout/mahoutrec/src/main/java/com/unresyst/UnresystRecommend.java", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "package com.unresyst;\n\nimport java.io.File;\nimport java.io.FileNotFoundException;\nimport java.util.List;\nimport java.io.IOException;\nimport java.io.PrintWriter;\nimport java.io.OutputStreamWriter;\nimport java.io.FileOutputStream;\nimport java.nio.charset.Charset;\n\nimport org.apache.commons.cli2.OptionException; \nimport org.apache.mahout.cf.taste.common.TasteException;\nimport org.apache.mahout.cf.taste.impl.model.file.FileDataModel;\nimport org.apache.mahout.cf.taste.impl.recommender.CachingRecommender;\nimport org.apache.mahout.cf.taste.impl.recommender.slopeone.SlopeOneRecommender;\nimport org.apache.mahout.cf.taste.model.DataModel;\nimport org.apache.mahout.cf.taste.recommender.RecommendedItem;\nimport org.apache.mahout.cf.taste.impl.common.LongPrimitiveIterator;\nimport org.apache.mahout.common.FileLineIterable;\nimport org.apache.mahout.common.IOUtils;\n\npublic class UnresystRecommend {\n \n public static void main(String[] args) throws FileNotFoundException, TasteException, IOException, OptionException {\n\n // parameters: train data filename, number of recommendations per user , output filename \n String trainFilename = args[0]; \n int recCount = Integer.parseInt(args[1]);\n String outputFilename = args[2];\n \n // create data source (model) - from the train csv file \n File ratingsFile = new File(trainFilename); \n DataModel model = new FileDataModel(ratingsFile); \n \n // create a simple recommender on our data\n CachingRecommender cachingRecommender = new CachingRecommender(new SlopeOneRecommender(model));\n \n // create recommendations for all users and write them to the output file\n //\n \n // open the file to write to\n File resultFile = new File(outputFilename);\n PrintWriter writer = new PrintWriter(new OutputStreamWriter(new FileOutputStream(resultFile), Charset.forName(\"UTF-8\")));\n \n // for all users\n for (LongPrimitiveIterator it = model.getUserIDs(); it.hasNext();){\n long userId = it.nextLong();\n \n // get the recommendations for the user\n List<RecommendedItem> recommendations = cachingRecommender.recommend(userId, recCount); \n \n \n // print the list of recommendations for each recommendation\n for (RecommendedItem recommendedItem : recommendations) {\n\n long itemId = recommendedItem.getItemID();\n float prediction = recommendedItem.getValue();\n \n writer.println(String.format(\"%d,%d,%f\", userId, itemId, prediction));\n }\n }\n \n // writer spam\n writer.flush();\n IOUtils.quietClose(writer); \n }\n}\n" }, { "alpha_fraction": 0.6050138473510742, "alphanum_fraction": 0.6061981916427612, "avg_line_length": 28.97633171081543, "blob_id": "621e8ed2a71bc636df4a679453b9be65b4736211", "content_id": "68f32ee383b09220c15fbba11b7af8ff59160436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5066, "license_type": "no_license", "max_line_length": 95, "num_lines": 169, "path": "/code/adapter/travel/models.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Models for the travel agency dataset\"\"\"\n\nfrom django.db import models\n\n\nfrom constants import *\nfrom unresyst.models import BaseEvaluationPair\n\nclass User(models.Model):\n \"\"\"The user\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return 'user_%d' % self.id \n\nclass Session(models.Model):\n \"\"\"User's session with the system\"\"\"\n \n user = models.ForeignKey('travel.User')\n \"\"\"The user\"\"\"\n \n session_no = models.PositiveIntegerField()\n \"\"\"The number of session for the user\"\"\"\n \n class Meta:\n unique_together = ('user', 'session_no')\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u'user %s: session: %d' % (self.user, self.session_no)\n \nclass Country(models.Model):\n \"\"\"The country for a tour.\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME, unique=True)\n \"\"\"The name of the country\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass TourType(models.Model):\n \"\"\"A model for a type of the tour.\"\"\" \n \n name = models.CharField(max_length=MAX_LENGTH_NAME, unique=True)\n \"\"\"The name of the type.\"\"\" \n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass Tour(models.Model):\n \"\"\"The tour\"\"\" \n \n name = models.CharField(max_length=MAX_LENGTH_NAME, unique=True)\n \"\"\"The name\"\"\"\n \n url = models.URLField(max_length=MAX_LENGTH_URL, verify_exists=False)\n \"\"\"The url without the final id and without GET parameters\"\"\"\n \n country = models.ForeignKey('travel.Country')\n \"\"\"The country the tour is to\"\"\"\n \n tour_type = models.ForeignKey('travel.TourType')\n \"\"\"The type of the tour\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass Action(models.Model):\n \"\"\"An abstract class for a user action\"\"\"\n\n session = models.ForeignKey('travel.Session')\n \"\"\"The session in which the action was taken (identifies the user)\"\"\"\n \n tour = models.ForeignKey('travel.Tour')\n \"\"\"The tour on which the action was taken\"\"\"\n \n timestamp = models.DateTimeField()\n \"\"\"The date and time the track was played\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"%s: %s\" % (self.session.user, self.tour)\n \n class Meta:\n abstract = True \n \nclass Order(Action):\n \"\"\"User ordered a tour\"\"\"\n pass\n\nclass Question(Action):\n \"\"\"User asked a question about a tour\"\"\"\n pass\n \nclass Click(Action):\n \"\"\"User clicked on something on the tour profile\"\"\"\n pass\n \nclass MouseMove(Action):\n \"\"\"User moved the mouse over something on the tour profile\"\"\"\n pass\n \nclass ViewProfile(Action):\n \"\"\"User viewed the profile of the tour\"\"\"\n \n duration = models.PositiveIntegerField(null=True)\n \"\"\"How long the user has been viewing the profile, \n In seconds.\"\"\" \n \n\nclass TourOrderEvalPair(BaseEvaluationPair):\n \"\"\"An abstract class for the artist evaluators\"\"\"\n\n subj = models.ForeignKey('travel.User')\n \"\"\"The subject\"\"\"\n \n obj = models.ForeignKey('travel.Tour')\n \"\"\"The object\"\"\"\n \n test_ratio = 0.2\n \"\"\"The ratio of pairs to select to test pairs\"\"\"\n\n class Meta:\n app_label = 'travel' \n \n @classmethod \n def select(cls, i=0):\n \"\"\"See the base class for the documentation.\"\"\" \n \n all_count = Order.objects.count() \n test_count = int(cls.test_ratio * all_count ) \n train_count = all_count - test_count\n\n min_stamp_test = Order.objects.order_by('-timestamp')[test_count-1].timestamp\n\n test_pairs=Order.objects.filter(timestamp__gte=min_stamp_test)\n train_pairs = Order.objects.filter(timestamp__lt=min_stamp_test)\n \n # remove all other feedback newer than the test timestamp\n ViewProfile.objects.filter(timestamp__gt=min_stamp_test).delete()\n MouseMove.objects.filter(timestamp__gt=min_stamp_test).delete()\n Click.objects.filter(timestamp__gt=min_stamp_test).delete()\n Question.objects.filter(timestamp__gt=min_stamp_test).delete()\n \n # take 1/5 of the orders, remove them and put them to test data \n # save to test, remove from build\n for order in test_pairs.iterator():\n\n # create the test pair for the order\n TourOrderEvalPair.objects.create(\n subj=order.session.user,\n obj=order.tour,\n expected_expectancy=EXPECTED_EXPECTANCY_ORDERED,\n )\n \n \n # delete the order\n order.delete()\n \n print \"%d test pairs selected from total %d pairs.\" % (test_count, all_count) \n\n def get_success(self):\n return True\n" }, { "alpha_fraction": 0.6163253784179688, "alphanum_fraction": 0.6197383403778076, "avg_line_length": 32.46666717529297, "blob_id": "5191888627dd960a304bff6a9351cab6d5565e4d", "content_id": "d455b849ec7714785a1e0ffbf66fbc292018548f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3516, "license_type": "no_license", "max_line_length": 87, "num_lines": 105, "path": "/UnresystCD/code/adapter/unresyst/models/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Base classes for the unresyst application.\"\"\"\n\nfrom django.db import models\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom symmetric import SymmetricalRelationship\nfrom unresyst.constants import *\n\nclass ContentTypeModel(models.Model):\n \"\"\"An abstract base class for all models having some subclasses whose\n instances shouldn't be always returned\n \n Be careful when using multiple inheritance: this class shouldbe used\n as the last and save() shouldn't be overriden in none of the other\n base classes\n \"\"\"\n\n content_type = models.ForeignKey(ContentType,editable=False, null=True)\n \"\"\"The actual type of the object.\"\"\"\n \n @classmethod\n def remove_subclass_objects(cls):\n \"\"\"Get instances only of this class\"\"\"\n # get the content type\n cont_type = ContentType.objects.get_for_model(cls)\n \n # return only this cont type\n return cls.objects.filter(content_type=cont_type)\n \n def as_leaf_class(self):\n \"\"\"Get the object as the whole inherited class\"\"\"\n\n # get the leaf class\n content_type = self.content_type\n Model = content_type.model_class()\n \n # get the appropriate object\n if (Model == self.__class__):\n return self\n return Model.objects.get(pk=self.pk) \n\n def save(self,*args, **kwargs):\n \"\"\"Save with the right content type\"\"\"\n \n if not self.content_type:\n self.content_type = ContentType.objects.get_for_model(self.__class__)\n \n super(ContentTypeModel, self).save(*args, **kwargs)\n\n\n class Meta:\n abstract = True \n app_label = 'unresyst' \n\n\nclass BaseRelationshipInstance(SymmetricalRelationship):\n \"\"\"An abstract class, the base class of all s-o, s-s, o-o relationships.\"\"\"\n\n subject_object1 = models.ForeignKey('unresyst.SubjectObject', \\\n related_name='%(class)s_relationships1')\n \"\"\"The first subject/object that is in the relationship.\"\"\" \n \n subject_object2 = models.ForeignKey('unresyst.SubjectObject', \\\n related_name='%(class)s_relationships2')\n \"\"\"The second subject/object that is in the relationship\"\"\" \n \n description = models.TextField(default='', blank=True)\n \"\"\"The description of the relationship/rule instance.\"\"\" \n \n \n attr_name1 = 'subject_object1'\n \"\"\"Overriden attribute name 1\"\"\"\n \n attr_name2 = 'subject_object2'\n \"\"\"Overriden attribute name 2\"\"\"\n\n class Meta:\n abstract = True \n app_label = 'unresyst' \n\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"(%s, %s)\" % (self.subject_object1, self.subject_object2) \n\n\nclass BaseRelationshipDefinition(ContentTypeModel):\n \"\"\"A definition of the relationship that should be predicted. There's only\n one for a recommender.\n \"\"\" \n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the relationship\"\"\" \n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender to which the definition belongs. Each recommender has\n exactly one predicted relationship.\n \"\"\" \n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n \n class Meta:\n app_label = 'unresyst'\n\n\n" }, { "alpha_fraction": 0.6709677577018738, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 29.799999237060547, "blob_id": "1ea1e45a1f2dcdcfa4e4e78454aaa69a127b4577", "content_id": "25141e17388802b89ba36cbcaf6eeb24da4e6e83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 155, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/UnresystCD/code/dataset_scripts/process_flixster.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "sort -n links.txt -o slinks.txt\nsort -n ratings.txt -o sratings.txt\n\nhead -15000 sratings.txt > mini/ratings.tsv\nhead -105000 slinks.txt > mini/links.tsv\n\n" }, { "alpha_fraction": 0.6352763175964355, "alphanum_fraction": 0.6361846923828125, "avg_line_length": 31.69801902770996, "blob_id": "9d281e87a9264e93f7f2b601d8131a225df6259b", "content_id": "cf3bc001473d3162fc7420079e877bb8cd03be11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6605, "license_type": "no_license", "max_line_length": 87, "num_lines": 202, "path": "/UnresystCD/code/adapter/unresyst/recommender/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Module containing the definition of BaseRecommender: the base class \nfor user-defined recommenders.\n\"\"\"\n\nfrom unresyst.constants import *\n\nclass BaseRecommender(object):\n \"\"\"The base class for all recommenders.\n \n Defines and the interface of the recommender. All methods and data fields\n are empty.\n \"\"\"\n\n # Methods:\n #\n #\n\n # Build phase:\n # \n @classmethod\n def build(cls):\n \"\"\"Build the recommender. Process all the rules and relationships \n in order to be able to provide recommendations.\n \"\"\"\n pass\n \n # Recommend phase:\n #\n \n @classmethod \n def predict_relationship(cls, subject, object_, save_to_db=False): \n \"\"\"Get the prediction of the given relationship type between the subject\n and the object. \n \n @type subject: domain specific subject\n @param subject: the subject\n \n @type object_: domain specific object\n @param object_: the object\n \n @type save_to_db: bool\n @param save_to_db: should the obtained predictions be saved, so that it\n doesn't have to be computed again when asked for the pair next time\n \n @rtype: RelationshipPrediction\n @return: An instance representing the prediction of the relationship \n between the subject and the object, containing the estimated \n probability and explanation. If the prediction isn't known, returns\n an prediction with expectancy 0.5 and empty explanation\n \n @raise InvalidParameterError: if the given subject or object doesn't \n have a domain neutral representation in the unresyst database. \n Either the recommender hasn't been built or the subject/object was\n added later without updating the recommender.\n \"\"\"\n pass\n \n @classmethod\n def get_recommendations(cls, subject, count=None):\n \"\"\"Get recommendations for the given subject.\n\n @type subject: domain specific subject\n @param subject: the subject\n \n @type count: int\n @param count: a maximum number of objects to be recommended, if not given,\n the default_recommendation_count from the recommender class is used.\n \n @rtype: list of RelationshipPrediction\n @return: An instance representing the prediction of the relationship \n between the subject and the object, containing the estimated \n probability and explanation. If the prediction isn't known, returns\n an prediction with expectancy 0.5 and empty explanation\n\n @raise InvalidParameterError: if the given subject doesn't \n have a domain neutral representation in the unresyst database. \n Either the recommender hasn't been built or the subject was\n added later without updating the recommender. \n \"\"\"\n pass\n \n \n # Update phase:\n # \n @classmethod \n def add_subject(cls, subject):\n \"\"\"Add subject to the recommender.\"\"\"\n pass\n\n @classmethod\n def add_object(cls, object_):\n \"\"\"Add object to the recommender.\"\"\"\n pass\n\n @classmethod\n def update_subject(cls, subject):\n \"\"\"Update the subject in the recommender, including its relationships\n and applied rules\"\"\"\n pass\n\n\n @classmethod \n def update_object(cls, object_):\n \"\"\"Update the object in the recommender, including its relationships\n and applied rules\"\"\"\n pass\n\n\n @classmethod\n def remove_subject(cls, subject):\n \"\"\"Remove the subject from the recommender, including its relationships\n and applied rules\"\"\"\n pass\n\n\n @classmethod \n def remove_object(cls, object_):\n \"\"\"Remove the object from the recommender, including its relationships\n and applied rules\"\"\"\n pass \n \n \n \n # Domain specific data. empty, will be overriden in the domain specific recommender\n #\n\n name = \"\"\n \"\"\"The name of the recommender\"\"\"\n \n subjects = None\n \"\"\"The objects to who the recommender will recommend.\n Requires the following interface:\n on subjects manager:\n - iterator(): get an iterator on the collection\n - all(): get all sujbects\n - get(id=..) get the object with the given id\n queryset:\n - exists(): is there something in the queryset?\n on each subject instance:\n - id: an integer id of the subject \n - __unicode__(): printable string\n \"\"\"\n \n objects = None\n \"\"\"The objects that will be recommended.\"\"\" \n\n predicted_relationship = None\n \"\"\"The relationship that will be predicted\"\"\"\n \n relationships = ()\n \"\"\"Relationships among the subjects and objects in the domain\"\"\"\n \n rules = ()\n \"\"\"Rules that can be applied to the domain\"\"\"\n \n cluster_sets = ()\n \"\"\"Clusters to which subjects and objects can be divided\"\"\"\n \n biases = ()\n \"\"\"Bias - predisposition of subjects/objects to be in the predicted_relationship\"\"\"\n\n random_recommendation_description = None\n \"\"\"The description that will be used as a description \n to random recommendations.\"\"\"\n\n # Auxiliary class attributes\n _recommender_model = None\n \"\"\"The database model instance belonging to the class\"\"\"\n \n # Class configuration - the behaviour of the layers below the recommender\n # Empty, will be overriden by the Recommender class\n #\n \n abstractor = None\n \"\"\"The class that will be used for the abstractor level. Can be \n overriden in subclasses\"\"\" \n \n algorithm = None\n \"\"\"The class that will be used for the algorithm level. Can be \n overriden in subclasses\"\"\"\n \n remove_predicted_from_recommendations = None\n \"\"\"Should the instances of the predicted_relationship be removed from\n recommendation list?\n \"\"\"\n \n save_all_to_predictions = None\n \"\"\"Should also instances of predicted_relationship be saved to predictions?\n \"\"\"\n \n recommendation_expectancy_limit = None\n \"\"\"The limit for expectancy above which the objects can be recommended.\n If not none, only objects with expectancy above the limit are recommended.\n A reasonable limit is 0.5 - the recommendations then don't include random \n objects.\n \"\"\"\n \n verbose_build = None\n \"\"\"Should messages be printed during the build?\"\"\"\n \n explicit_rating_rule = None\n \"\"\"If given, this rule is exported, not the predicted_relationship\"\"\"\n" }, { "alpha_fraction": 0.8416422009468079, "alphanum_fraction": 0.8416422009468079, "avg_line_length": 36.88888931274414, "blob_id": "5f3d97050e753b19fe0d90682b372202a501009f", "content_id": "d6ffb603555e521ec623ae4497ae716e0fe36042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 69, "num_lines": 9, "path": "/UnresystCD/code/adapter/unresyst/algorithm/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The package contains all classes for the algorithm layer.\n\nThe algorithm layer can be replaced by a common algorithm exploiting \nthe known preferences, like collaborative filtering.\n\"\"\"\n\nfrom simple_algorithm import SimpleAlgorithm\nfrom aggregating_algorithm import AggregatingAlgorithm\nfrom compiling_algorithm import CompilingAlgorithm\n" }, { "alpha_fraction": 0.6346863508224487, "alphanum_fraction": 0.6346863508224487, "avg_line_length": 19.820512771606445, "blob_id": "ff32dd7225c9e17f47ae2c0d66a81b75a053a4cd", "content_id": "ae5df24917045a80cc0081f9e32c1d5fa3829312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 813, "license_type": "no_license", "max_line_length": 132, "num_lines": 39, "path": "/UnresystCD/code/adapter/lastfmeval.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# run the evaluation- recommendations. parameters \n# novel -> NovelArtistRecommender, \n# nenovel -> ArtistRecommender\n# dontbuild-> don't build\n\nBUILD=true\nEVAL=''\nREC=''\n\nfor param in $*;\ndo \n case $param in\n 'novel')\n EVAL='NovelArtistRankEvaluator'\n REC='NovelArtistRecommender'\n ;;\n 'nenovel')\n EVAL='ArtistRankEvaluator'\n REC='ArtistRecommender'\n ;;\n 'dontbuild')\n BUILD=false\n ;;\n esac\ndone\n\nif [ $BUILD = true ]\nthen\n # build\n echo \"Building...\"\n echo \"from lastfm.recommender import *; $REC.build()\" | python ./manage.py shell\nfi\n\necho \"Evaluating recommendations...\"\necho \"from lastfm.evaluation import *; from lastfm.recommender import *; $EVAL.evaluate_predictions($REC)\"| python ./manage.py shell\n\necho \"\"\n\n" }, { "alpha_fraction": 0.6459985375404358, "alphanum_fraction": 0.6459985375404358, "avg_line_length": 20.671875, "blob_id": "240d9c3e17fedad9e7ad2dc48ce541cc1373512c", "content_id": "acecebc085fa5fe72c71f0bf3e43802cb269eee3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1387, "license_type": "no_license", "max_line_length": 132, "num_lines": 64, "path": "/UnresystCD/code/adapter/build.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "# build the shoe recommender\n\nLASTFM=false\nNLASTFM=false\nDEMO=false\nADEMO=false\nFLIXSTER=false\nTRAVEL=false\n\nfor param in $*;\ndo \n case $param in\n 'lastfm')\n LASTFM=true\n ;;\n 'nlastfm')\n NLASTFM=true\n ;;\n 'demo')\n DEMO=true\n ;;\n 'flixster')\n FLIXSTER=true\n ;;\n 'travel')\n TRAVEL=true\n ;;\n 'ademo')\n ADEMO=true\n ;; \n esac\ndone\n\nif [ $DEMO = true ]\nthen\n echo \"from demo.recommender import ShoeRecommender; ShoeRecommender.build(); quit();\" | python ./manage.py shell\nfi\n\nif [ $ADEMO = true ]\nthen\n echo \"from demo.recommender import AverageRecommender; AverageRecommender.build(); quit();\" | python ./manage.py shell\nfi\n\nif [ $LASTFM = true ]\nthen\n echo \"from lastfm.recommender import ArtistRecommender; ArtistRecommender.build(); quit();\" | python ./manage.py shell\nfi\n\nif [ $NLASTFM = true ]\nthen\n echo \"from lastfm.recommender import NovelArtistRecommender; NovelArtistRecommender.build(); quit();\" | python ./manage.py shell\nfi\n\nif [ $FLIXSTER = true ]\nthen\n echo \"from flixster.recommender import MovieRecommender; MovieRecommender.build(); quit();\" | python ./manage.py shell\nfi\n\nif [ $TRAVEL = true ]\nthen\n echo \"from travel.recommender import OrderTourRecommender; OrderTourRecommender.build(); quit();\" | python ./manage.py shell\nfi\n\necho \"\"\n" }, { "alpha_fraction": 0.8062015771865845, "alphanum_fraction": 0.8062015771865845, "avg_line_length": 31.25, "blob_id": "03d93fb7e5af5f9cfcc13af9d980d345de9381c7", "content_id": "728db3828f4936747110a00115337c277963c69e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 81, "num_lines": 4, "path": "/UnresystCD/code/adapter/unresyst/recommender/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Package holding the BaseRecommender class, holding the interface for the whole\nunresyst.\"\"\"\n\nfrom base import BaseRecommender\n" }, { "alpha_fraction": 0.6535211205482483, "alphanum_fraction": 0.6788732409477234, "avg_line_length": 21.125, "blob_id": "51a21eb6ed1100fac55f6654d42dd24e300e396c", "content_id": "46b996f5dd70d8892ad94fe110f7a4e6d7be3cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/code/adapter/flixster/constants.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Constants for flixster\"\"\"\n\nMIN_POSITIVE_RATING = 3.0\n\"\"\"Minimal rating taken as positive\"\"\"\n\nMIN_HIGH_RATING = 4.0\n\"\"\"Minimal rating taken as \"high\".\"\"\"\n\nMAX_LOW_RATING = 1.0\n\"\"\"Maximal rating taken as \"low\".\"\"\"\n\nMAX_STARS = 5\n\"\"\"Maximal number of starts in rating\"\"\"\n\nMAX_TOLERANCE = 1.0\n\"\"\"Size of the interval to take a prediction as successful\"\"\"\n\n" }, { "alpha_fraction": 0.5889149904251099, "alphanum_fraction": 0.598078191280365, "avg_line_length": 36.93692398071289, "blob_id": "dc6153f14a58a55b7cb4816548e8b114e8d4c65d", "content_id": "5ad83dc9f7b4f6ed08692ad2a5336acebffd92d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24664, "license_type": "no_license", "max_line_length": 112, "num_lines": 650, "path": "/UnresystCD/code/adapter/unresyst/recommender/rules.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The classes for representing business rules and relationships\"\"\"\n\nfrom unresyst.constants import *\n\nfrom unresyst.models.abstractor import *\nfrom unresyst.models.common import SubjectObject\nfrom unresyst.exceptions import DescriptionKeyError, ConfigurationError\n\nclass BaseRelationship(object):\n \"\"\"A base class for representing all relationships and rules.\n \n A subclass for all classes representing a relationship between entities (not necessarily \n of the same type). Contains the condition that is true between and only \n between the entities that are in the given relationship.\n \"\"\"\n \n def __init__(self, name, condition=None, description=None, generator=None):\n \"\"\"The constructor.\"\"\"\n \n self.name = name\n \"\"\"The name of the rule/relationship.\"\"\"\n \n self.condition = condition\n \"\"\"A boolean function that represents the condition. If True \n for the given pair of entities, there's the Relationship between the \n entities. \n Should be simple.\n \"\"\"\n \n self.description = description\n \"\"\"A string describing the rule. It can contain placeholders for entities: \n \n - %(subject)s, %(object)s for subject-object relationships and rules\n - %(subject1)s, %(subject2)s for subject-subject relationships \n and rules\n - %(object1)s, %(object2)s for object-object relationships and rules\n - %(subjectobject1)s, %(subjectobject2)s for recommenders where \n subject domain is the same as object domain \n \"\"\"\n \n self.generator = generator\n \"\"\"A generator returning pairs of objects that are in the relationship/\n the rule applies to them.\n For performance reasons - if given, the pairs will be taken from it \n without the need for evaluating the condition for each possible pair.\n \"\"\"\n \n DESCRIPTION_FORMAT_DICT = {\n RELATIONSHIP_TYPE_SUBJECT_OBJECT: \n (FORMAT_STR_SUBJECT, FORMAT_STR_OBJECT),\n RELATIONSHIP_TYPE_SUBJECT_SUBJECT: \n (FORMAT_STR_SUBJECT1, FORMAT_STR_SUBJECT2),\n RELATIONSHIP_TYPE_OBJECT_OBJECT: \n (FORMAT_STR_OBJECT1, FORMAT_STR_OBJECT2),\n RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT: \n (FORMAT_STR_SUBJECTOBJECT1, FORMAT_STR_SUBJECTOBJECT2)\n }\n \"\"\"A dictionary relationship type (e.g. 'S-O') a pair of formatting strings \n for description, e.g. ('subject', 'object').\n \"\"\"\n\n relationship_type = RELATIONSHIP_TYPE_SUBJECT_OBJECT\n \"\"\"The type of the relationship S-O\"\"\"\n \n InstanceClass = RelationshipInstance\n \"\"\"The model class used for representing instances of the rule/relationship\"\"\"\n \n DefinitionClass = RuleRelationshipDefinition\n \"\"\"The model class used for representing the definition of the \n rule/relationship\n \"\"\"\n \n is_symmetric = False\n \"\"\"Is the rule/relationship between the entities of the same domain?\n True for S-S, O-O, SO-SO\n \"\"\"\n \n def get_filled_description(self, arg1, arg2):\n \"\"\"Get description for a rule/relationship instance, between \n arg1 and arg2. \n \n @type arg1: models.SubjectObject\n @param arg1: the first subjectobject in the relationship/rule\n\n @type arg2: models.SubjectObject\n @param arg2: the second subjectobject in the relationship/rule\n \n @rtype: str\n @return: a string with filled gaps for entities.\n \"\"\"\n # get the format strings, e.g. ('subject', 'object')\n format_strings = self.DESCRIPTION_FORMAT_DICT[self.relationship_type]\n \n # create the formating strings to be passed to description\n format_dict = {\n format_strings[0]: arg1,\n format_strings[1]: arg2\n }\n if self.description is None:\n desc = ''\n else:\n try: \n desc = self.description % format_dict\n except KeyError, e:\n raise DescriptionKeyError(\n message=\"There's an invalid key in description\",\n recommender=self.recommender,\n name=self.name, \n key=e.__str__(), \n permitted_keys=format_dict.keys()\n )\n \n return desc\n \n def get_create_definition_kwargs(self):\n \"\"\"Get dictionary of parameters for the definition model constructor. \n \n @rtype: dictionary string: object\n @return: the kwargs of the definition model constructor \n \"\"\"\n return {\n \"name\": self.name,\n \"recommender\": self.recommender._get_recommender_model(),\n }\n \n def get_additional_instance_kwargs(self, ds_arg1, ds_arg2):\n \"\"\"Get dictionary of additional kwargs for creating the rule/relationship\n instance. \n \n @type ds_arg1: django.db.models.manager.Manager\n @param ds_arg1: the first argument of the rule instance - domain specific\n \n @type ds_arg2: django.db.models.manager.Manager\n @param ds_arg2: the second argument of the rule instance - domain specific\n \n @rtype: dictionary string: object\n @return: additional keyword args for creating rule/relationship instance \n \"\"\"\n return {}\n \n \n def evaluate_on_dn_args(self, dn_arg1, dn_arg2, definition): \n \"\"\"Evaluates the rule on the given arguments. If evaluated positively,\n a new rule/relationship instance is saved.\n \n @type dn_arg1: models.SubjectObject\n @param dn_arg1: a domain neutral representation of the first entity.\n\n @type dn_arg2: models.SubjectObject\n @param dn_arg2: a domain neutral representation of the second entity.\n \n @type definition: models.abstractor.RuleRelationshipDefinition\n @param definition: the model representing the rule/relationship \n definition\n \n @rtype: int\n @return: 1 if something has benn created, 0 if not\n \"\"\"\n \n # get the domain specific objects for our universal representations\n #\n arg1_manager = self.recommender._get_entity_manager(dn_arg1.entity_type)\n ds_arg1 = dn_arg1.get_domain_specific_entity(entity_manager=arg1_manager)\n \n arg2_manager = self.recommender._get_entity_manager(dn_arg2.entity_type)\n ds_arg2 = dn_arg2.get_domain_specific_entity(entity_manager=arg2_manager)\n\n # if the condition is satisfied\n if self.condition(ds_arg1, ds_arg2):\n \n self._perform_save_instance(definition, ds_arg1, ds_arg2, dn_arg1, dn_arg2)\n return 1\n \n return 0\n\n @classmethod\n def order_arguments(cls, dn_arg1, dn_arg2):\n \"\"\"Order the arguments as they appear in the relationship.\"\"\"\n a1, a2, x, x = cls._order_in_pair(dn_arg1, dn_arg2, None, None)\n \n return (a1, a2)\n\n @classmethod\n def _order_in_pair(cls, dn_arg1, dn_arg2, ds_arg1, ds_arg2):\n \"\"\"Swap the arguments in the rule/relationships so that the first\n has a lower id than the second\n \"\"\"\n\n # if the second argument has lower id than the first, swap them\n if dn_arg2.pk < dn_arg1.pk: \n return (dn_arg2, dn_arg1, ds_arg2, ds_arg1)\n\n # otherwise not\n return (dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n \n\n def _perform_save_instance(self, definition, ds_arg1, ds_arg2, dn_arg1, dn_arg2):\n \"\"\"Perform the action of creating and saving the instance\"\"\"\n \n # order the instances in pairs as the class requires\n dn_arg1, dn_arg2, ds_arg1, ds_arg2 = self._order_in_pair(dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n \n add_kwargs = self.get_additional_instance_kwargs(ds_arg1, ds_arg2)\n \n # create a rule/relationship instance\n instance = self.InstanceClass(\n definition=definition,\n subject_object1=dn_arg1,\n subject_object2=dn_arg2,\n description=self.get_filled_description(dn_arg1, dn_arg2),\n **add_kwargs)\n \n instance.save()\n\n\n def save_instance(self, ds_arg1, ds_arg2, definition): \n \"\"\"Save an instance of the rule/relationship for the given args.\n \n @type ds_arg1: domain specific subject/object\n @param ds_arg1: a domain specific representation of the first entity.\n\n @type ds_arg2: domain specific subject/object\n @param ds_arg2: a domain specific representation of the second entity.\n \n @type definition: models.abstractor.RuleRelationshipDefinition\n @param definition: the model representing the rule/relationship \n definition\n \n @raise ConfigurationError: thrown if the condition doesn't evaluate \n to true on the given pair\n \"\"\"\n if not (self.condition is None) and not self.condition(ds_arg1, ds_arg2):\n raise ConfigurationError(\n message=(\"The condition wasn't evaluated as true for the pair \" + \n \"%s, %s, even though it was returned by the generator.\") % (ds_arg1, ds_arg2), \n recommender=self.recommender, \n parameter_name='rules/relationships', \n parameter_value=self.name)\n \n # convert the domain specific to domain neutral \n #\n arg1_ent_type, arg2_ent_type = self.relationship_type.split(RELATIONSHIP_TYPE_SEPARATOR) \n\n dn_arg1 = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=ds_arg1, \n entity_type=arg1_ent_type, \n recommender=definition.recommender)\n\n dn_arg2 = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=ds_arg2, \n entity_type=arg2_ent_type, \n recommender=definition.recommender)\n \n # create and save the instance\n self._perform_save_instance(definition, ds_arg1, ds_arg2, dn_arg1, dn_arg2) \n \n \n \n def evaluate(self):\n \"\"\"Evaluate the rule on all subjects/objects - pairs.\n \n Creates and saves the rule/relationship definition, creates and saves\n rule instances. \n \"\"\"\n \n # obtain the kwargs for creating the definition\n def_kwargs = self.get_create_definition_kwargs()\n\n # create and save the definition\n definition = self.DefinitionClass(**def_kwargs)\n definition.save()\n\n i = 0 \n # if we have a generator, use it for looping through pairs\n if not (self.generator is None):\n \n\n # loop through pairs, save the rule/relationship instances\n for ds_arg1, ds_arg2 in self.generator():\n self.save_instance(ds_arg1, ds_arg2, definition)\n i += 1\n \n print \" %d instances of rule/rel %s created\" % (i, self.name)\n \n # that's it\n return\n \n \n # otherwise take the entities one by one\n \n # parse what should be used as condition args\n arg1_s, arg2_s = self.relationship_type.split(RELATIONSHIP_TYPE_SEPARATOR) \n \n if arg1_s == arg2_s:\n \n # loop only through the matrix members below the diagonal \n # \n i = 0\n for arg1, arg2 in SubjectObject.unique_pairs(\n recommender=self.recommender._get_recommender_model(),\n entity_type=arg1_s): \n # evaluate it\n i += self.evaluate_on_dn_args(arg1, arg2, definition)\n \n\n \n else:\n # filter subjectobjects for my recommender\n qs_recommender = SubjectObject.objects.filter(\n recommender=self.recommender._get_recommender_model())\n \n # go through all things that have to be as first and as second param\n for arg1 in qs_recommender.filter(entity_type=arg1_s).iterator():\n for arg2 in qs_recommender.filter(entity_type=arg2_s).iterator():\n \n # evaluate the rule/relationship on the given args\n i += self.evaluate_on_dn_args(arg1, arg2, definition)\n\n print \" %d instances of rule/rel %s created\" % (i, self.name)\n\n \n def export(self, f):\n \"\"\"Export the relationship as lines to the given file object.\n \n Exports the relationship in form:\n subject_id, object_id[,confidence]\\n\n \n confidence is exported only if it's relevant,\n no matter if it's positive or not\n \n @type f: file\n @param f: the file to write to\n \n @raise: ConfigurationError: if the rule/relationship doesn't have\n a generator.\n \"\"\"\n \n # if we don't have a generator raise an error\n if self.generator is None:\n raise ConfigurationError(\n message=\"The relationship to export is missing a generator.\", \n recommender=cls, \n parameter_name='?', \n parameter_value=cls.name)\n \n i = 0\n \n # loop through pairs, export the rule/relationship instances\n for ds_arg1, ds_arg2 in self.generator():\n \n # create the common part\n linestr = \"%s,%s\" % (ds_arg1.pk, ds_arg2.pk)\n \n # get the confidence if provided\n adkwargs = self.get_additional_instance_kwargs(ds_arg1, ds_arg2)\n \n # if confidence provided, append it\n if adkwargs.has_key(EXPECTANCY_KWARG_NAME):\n linestr += \",%f\" % adkwargs[EXPECTANCY_KWARG_NAME]\n \n linestr += '\\n'\n \n # write it to the file\n f.write(linestr)\n \n i += 1\n \n print \" %d instances of rule/rel %s exported\" % (i, self.name)\n\n \nclass PredictedRelationship(BaseRelationship):\n \"\"\"A class for representing the predicted relationship.\"\"\"\n\n DefinitionClass = PredictedRelationshipDefinition\n \"\"\"The model class used for representing the definition of the \n rule/relationship\n \"\"\" \n \n @classmethod\n def _order_in_pair(cls, dn_arg1, dn_arg2, ds_arg1, ds_arg2):\n \"\"\"Swap the arguments in the relationship so that the first\n is always a subject and second the object.\n \"\"\" \n \n # for the SO entities we apply the normal policy\n if dn_arg1.entity_type == ENTITY_TYPE_SUBJECTOBJECT:\n return super(PredictedRelationship, cls)._order_in_pair(dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n\n # otherwise we put subjects as first\n \n # if the first is object, swap\n if dn_arg1.entity_type == ENTITY_TYPE_OBJECT:\n return (dn_arg2, dn_arg1, ds_arg2, ds_arg1)\n \n # if not, keep\n return (dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n\nclass ExplicitSubjectObjectRule(BaseRelationship):\n \"\"\"A class for representing explicit preference like rating, for attributes where\n the user can express both positive and negative preference.\"\"\"\n\n relationship_type = RELATIONSHIP_TYPE_SUBJECT_OBJECT\n \"\"\"The type of the relationship S-O\"\"\" \n\n DefinitionClass = ExplicitRuleDefinition\n \"\"\"The model class used for representing the definition of the \n rule/relationship\n \"\"\" \n \n InstanceClass = ExplicitRuleInstance\n \"\"\"The model class used for representing instances of \n the rule/relationship\"\"\"\n \n \n def __init__(self, name, expectancy, condition=None, description=None, generator=None):\n \"\"\"The constructor.\"\"\"\n \n super(ExplicitSubjectObjectRule, self).__init__(\n name=name, \n condition=condition, \n description=description, \n generator=generator)\n \n self.expectancy = expectancy\n \"\"\"A function taking a subject and an object, giving the explicit preference\n normalized to [0, 1].\n \"\"\"\n \n def get_additional_instance_kwargs(self, ds_arg1, ds_arg2):\n \"\"\"See the base class for documentation\n \n @raise ConfigurationError: if the confidence function returns a value\n outside [0, 1]\n \"\"\"\n ret_dict = super(ExplicitSubjectObjectRule, self)\\\n .get_additional_instance_kwargs(ds_arg1, ds_arg2)\n \n # call the user-defined confidence method\n expectancy = self.expectancy(ds_arg1, ds_arg2)\n \n if not (MIN_EXPECTANCY <= expectancy <= MAX_EXPECTANCY):\n raise ConfigurationError(\n message=(\"The rule '%s' has expectancy %f, for the\" + \\\n \" pair (%s, %s). Should be between 0 and 1.\") % \\\n (self.name, expectancy, ds_arg1, ds_arg2),\n recommender=self.recommender,\n parameter_name=\"Recommender.rules\",\n parameter_value=self.recommender.rules\n )\n \n ret_dict[EXPECTANCY_KWARG_NAME] = expectancy\n return ret_dict \n\n @classmethod\n def _order_in_pair(cls, dn_arg1, dn_arg2, ds_arg1, ds_arg2):\n return PredictedRelationship._order_in_pair(dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n \nclass _WeightedRelationship(BaseRelationship):\n \"\"\"A class representing a relationship with a weight.\"\"\"\n\n DefinitionClass = RuleRelationshipDefinition\n \"\"\"The model class used for representing the definition of the \n rule/relationship\n \"\"\" \n \n def __init__(self, name, is_positive, weight, condition=None, description=None, generator=None):\n \"\"\"The constructor.\"\"\"\n \n super(_WeightedRelationship, self).__init__(\n name=name, \n condition=condition, \n description=description, \n generator=generator)\n \n self.is_positive = is_positive\n \"\"\"Is the relationship positive to the predicted relationship?\"\"\"\n \n self.weight = weight\n \"\"\"A float number from [0, 1] representing the *static* weight of the rule. \n It doesn't depend on the entity pair.\n \"\"\"\n\n\n def get_create_definition_kwargs(self):\n \"\"\"Get dictionary of parameters for the definition model constructor. \n \n Add the weight to the parameters. \n \n @rtype: dictionary string: object\n @return: the kwargs of the definition model constructor \n \n @raise ConfigurationError: if the weight isn't from [0, 1]\n \"\"\"\n ret_dict = super(_WeightedRelationship, self).get_create_definition_kwargs()\n \n ret_dict['is_positive'] = self.is_positive\n\n if not (MIN_WEIGHT <= self.weight <= MAX_WEIGHT):\n raise ConfigurationError(\n message=(\"The rule/relationship '%s' has weight %f,\" + \\\n \" should be between 0 and 1.\") % (self.name, self.weight),\n recommender=self.recommender,\n parameter_name=\"Recommender.rules or Recommender.relationships\",\n parameter_value=(self.recommender.rules, self.recommender.relationships)\n )\n \n ret_dict['weight'] = self.weight\n ret_dict[\"relationship_type\"] = self.relationship_type \n \n return ret_dict\n \n \n \n\nclass SubjectObjectRelationship(_WeightedRelationship):\n \"\"\"A class for representing subject-object preference for recommendation\"\"\"\n \n relationship_type = RELATIONSHIP_TYPE_SUBJECT_OBJECT\n \"\"\"The type of the relationship S-O\"\"\" \n\n @classmethod\n def _order_in_pair(cls, dn_arg1, dn_arg2, ds_arg1, ds_arg2):\n return PredictedRelationship._order_in_pair(dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n\nclass _SimilarityRelationship(_WeightedRelationship):\n \"\"\"A base class (abstract) for all relationships operating between the same type \n and meaning similarity.\n \"\"\"\n \n is_symmetric = True\n \"\"\"For documentation see the base class\"\"\"\n \n\nclass ObjectSimilarityRelationship(_SimilarityRelationship):\n \"\"\"A class for representing inter-object similarity.\"\"\" \n\n relationship_type = RELATIONSHIP_TYPE_OBJECT_OBJECT\n \"\"\"The type of the relationship O-O\"\"\" \n\n\nclass SubjectSimilarityRelationship(_SimilarityRelationship):\n \"\"\"A class for representing inter-subject similarity.\"\"\"\n\n relationship_type = RELATIONSHIP_TYPE_SUBJECT_SUBJECT\n \"\"\"The type of the relationship S-S\"\"\"\n\n\nclass SubjectObjectSimilarityRelationship(_SimilarityRelationship):\n \"\"\"A class used only when subject domain equals object domain. \n For representing inter-entity similarity.\n \"\"\"\n \n relationship_type = RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT\n \"\"\"The type of the relationship SO-SO\"\"\"\n\n\n# rules:\n# \n\nclass _BaseRule(_WeightedRelationship):\n \"\"\"A base class for all rules (abstract).\"\"\"\n \n InstanceClass = RuleInstance\n \"\"\"The model class used for representing instances of \n the rule/relationship\"\"\"\n \n def __init__(self, name, is_positive, weight, confidence, condition=None, description=None, generator=None):\n \"\"\"The constructor.\"\"\" \n\n super(_BaseRule, self).__init__(\n name=name, \n condition=condition,\n is_positive=is_positive, \n weight=weight, \n description=description, \n generator=generator)\n \n self.confidence = confidence\n \"\"\"A float function giving values from [0, 1] representing the \n the confidence of the rule on the given pair. \n It's dynamic, depends on the entity pair.\n \"\"\" \n \n def get_additional_instance_kwargs(self, ds_arg1, ds_arg2):\n \"\"\"See the base class for documentation\n \n @raise ConfigurationError: if the confidence function returns a value\n outside [0, 1]\n \"\"\"\n ret_dict = super(_BaseRule, self).get_additional_instance_kwargs(\n ds_arg1, ds_arg2)\n \n # call the user-defined confidence method\n confidence = self.confidence(ds_arg1, ds_arg2)\n \n if not (MIN_CONFIDENCE <= confidence <= MAX_CONFIDENCE):\n raise ConfigurationError(\n message=(\"The rule '%s' has a confidence %f, for the\" + \\\n \" pair (%s, %s). Should be between 0 and 1.\") % \\\n (self.name, confidence, ds_arg1, ds_arg2),\n recommender=self.recommender,\n parameter_name=\"Recommender.rules\",\n parameter_value=self.recommender.rules\n )\n \n ret_dict[CONFIDENCE_KWARG_NAME] = confidence\n return ret_dict\n \n \n# confidence by taky mohla vracet string s doplnujicim vysvetlenim, \n\nclass _SimilarityRule(_BaseRule):\n \"\"\"A base class (abstract) for all rules operating between the same type \n and meaning similarity.\"\"\"\n\n is_symmetric = True\n \"\"\"For documentation see the base class\"\"\" \n \n\nclass ObjectSimilarityRule(_SimilarityRule):\n \"\"\"A class for representing inter-object similarity.\"\"\" \n\n relationship_type = RELATIONSHIP_TYPE_OBJECT_OBJECT\n \"\"\"The type of the relationship O-O\"\"\" \n\n\nclass SubjectSimilarityRule(_SimilarityRule):\n \"\"\"A class for representing inter-subject similarity.\"\"\"\n\n relationship_type = RELATIONSHIP_TYPE_SUBJECT_SUBJECT\n \"\"\"The type of the relationship S-S\"\"\" \n\n\nclass SubjectObjectRule(_BaseRule):\n \"\"\"A class for representing subject-object preference for recommendation\"\"\"\n\n relationship_type = RELATIONSHIP_TYPE_SUBJECT_OBJECT\n \"\"\"The type of the relationship S-O\"\"\" \n\n @classmethod\n def _order_in_pair(cls, dn_arg1, dn_arg2, ds_arg1, ds_arg2):\n return PredictedRelationship._order_in_pair(dn_arg1, dn_arg2, ds_arg1, ds_arg2)\n \nclass SubjectObjectSimilarityRule(_SimilarityRelationship):\n \"\"\"A class used only when subject domain equals object domain. \n For representing inter-entity rule.\n \"\"\"\n \n relationship_type = RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT\n \"\"\"The type of the relationship SO-SO\"\"\"\n\n \n" }, { "alpha_fraction": 0.6410256624221802, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 25, "blob_id": "8bd5bbee2b8e0dfe1070d9756b7487bd06c7114b", "content_id": "c5c7bf0826c526e2a2d602c31e306b308e2b8563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 156, "license_type": "no_license", "max_line_length": 62, "num_lines": 6, "path": "/code/adapter/dump.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# dump the database to given file (relative from ../db/dumps/)\nmysqldump --opt -u root adapter > ../db/dumps/$1\n\necho \"dumped to ../db/dumps/$1\"\n" }, { "alpha_fraction": 0.6247366070747375, "alphanum_fraction": 0.626033365726471, "avg_line_length": 37.316768646240234, "blob_id": "cca61489cbc79c8f905c9f2ec184854d1dec0025", "content_id": "c8c35fff934115f4b210c138e687c3751a25175c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6169, "license_type": "no_license", "max_line_length": 89, "num_lines": 161, "path": "/UnresystCD/code/adapter/unresyst/models/common.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The models that are common to the whole unresyst application.\"\"\"\n\nfrom django.db import models\n\nfrom unresyst.constants import *\n\nclass Recommender(models.Model):\n \"\"\"The representation of a recommender. \n \n There can be multiple recommenders for one parent system.\n \"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the recommender\"\"\"\n \n class_name = models.CharField(max_length=MAX_LENGTH_CLASS_NAME, unique=True)\n \"\"\"The name of the recommender class. Has to be unique.\"\"\"\n\n are_subjects_objects = models.BooleanField()\n \"\"\"Are subjects == objects for the recommender?\"\"\"\n \n is_built = models.BooleanField()\n \"\"\"Is the recommender built?\"\"\"\n \n random_recommendation_description = models.TextField(default='', blank=True)\n \"\"\"The description of the relationship/rule instance.\"\"\" \n \n remove_predicted_from_recommendations = models.BooleanField()\n \"\"\"Should the objects that are already \"liked\" be removed from \n recommendations?\"\"\"\n \n class Meta:\n app_label = 'unresyst'\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass SubjectObject(models.Model):\n \"\"\"The common representation for a subject and an object.\"\"\"\n \n id_in_specific = models.CharField(max_length=MAX_LENGTH_ID)\n \"\"\"The id of the subject/object in the domain-specific system.\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"A textual characterization of the subject/object\"\"\"\n \n entity_type = models.CharField(max_length=MAX_LENGTH_ENTITY_TYPE, \\\n choices=ENTITY_TYPE_CHOICES)\n \"\"\"A string indicating whether it's a subject, object or both.s/o/so\"\"\"\n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender to which the subject/object belongs.\"\"\"\n \n\n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('id_in_specific', 'entity_type', 'recommender')\n \"\"\"There can be only one subject/object with the given id and \n recommender.\n \"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n \n @classmethod\n def get_domain_neutral_entity(cls, domain_specific_entity, entity_type, recommender):\n \"\"\"Get domain neutral representation of the given domain specific entity\n (subject/object/subjectobject)\n \n @type domain_specific_entity: django.db.models.Model\n @param domain_specific_entity: the domain specific subject/object/\n subjectobject for which the domain neutral repre should be got\n \n @type entity_type: str\n @param entity_type: 'S'/'O'/'SO' .. see constants, determines whether\n the entity is a subject, object or both\n \n @type recommender: models.Recommender\n @param recommended: the recommender the for which the entity should \n be got\n \n @rtype: models.SubjectObject\n @returns: the domain neutral representation of the entity \n \n @raise MultipleObjectsReturned: when there's a broken constraint in \n the unresyst database (hopefully never)\n @raise DoesNotExist: when the domain neutral representation for \n the given entity does not exist\n \"\"\"\n return cls.objects.get(\n id_in_specific=domain_specific_entity.pk,\n entity_type=entity_type,\n recommender=recommender)\n \n def get_domain_specific_entity(self, entity_manager):\n \"\"\"Get domain specific subject/object/both for this universal \n representation.\n \n @type entity_manager: django.db.models.manager.Manager\n @param entity_manager: the manager over the model containing \n the domain specific subjects/objects/bot\n \n @rtype: models.Model\n @returns: the domain specific entity for this universal representation\n \n @raise MultipleObjectsReturned: when there's a broken constraint in \n the client database (hopefully never)\n @raise DoesNotExist: when the domain specific entity for \n this universal entity does not exist\n \"\"\"\n return entity_manager.get(pk=self.id_in_specific)\n\n @classmethod\n def unique_pairs(cls, recommender, entity_type):\n \"\"\"A generator looping through the pairs of subjectobjects so that each two \n object set is returned only once. \n \n E.g. after (a, b), the (b, a) pair isn't returned. \n It doesn't return pairs like (a, a).\n \n Useful for symmetric rule and relationship evaluation.\n \n @type recommender: models.Recommender\n @param recommender: the recommender model for which the pairs should be\n obtained\n\n @type entity_type: str ('S', 'O' or 'SO') or None\n @param entity_type: the entity type from whic the pair should be taken,\n if None, all entity types are taken\n \n @rtype: generator for two-tuples\n @returns: pairs of subjectobjects entity_type entities belonging to \n the recommender.\n \"\"\"\n ent_type_kwargs = {} if entity_type is None \\\n else {'entity_type': entity_type}\n \n # get the subjectobjects to iterate over\n qs_entities = cls.objects.filter(\n recommender=recommender, \n **ent_type_kwargs).order_by('id')\n\n # the number of entities\n entity_count = qs_entities.count() \n \n # get the first argument and the number of entities that \n # will be taken as the second arg. Starting from 1, \n # finishing at <count -1>.\n # The first entity will never be used as second argument \n for arg1, count in zip( \\\n qs_entities[1:].iterator(), \\\n range(1, entity_count)):\n\n # obtain only first count entities\n for arg2 in qs_entities[:count].iterator():\n\n yield (arg1, arg2)\n" }, { "alpha_fraction": 0.7341772317886353, "alphanum_fraction": 0.7341772317886353, "avg_line_length": 18.75, "blob_id": "be3fa2be861725aaa0b158caf2c22896177abf5b", "content_id": "a301faf6bd839a0f7365d3ea650a3ddbf69c3595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 44, "num_lines": 4, "path": "/code/adapter/unresyst/tests/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The module containing tests for unresyst.\n\nIt uses the demo appliation.\n\"\"\"\n" }, { "alpha_fraction": 0.7977900505065918, "alphanum_fraction": 0.8016999363899231, "avg_line_length": 28.263681411743164, "blob_id": "087d0f7433293c7351326b79e62c3a817fdaf1c6", "content_id": "5753a3ca5f8ff52fc6a7e772132a383b686a18d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 11765, "license_type": "no_license", "max_line_length": 149, "num_lines": 402, "path": "/code/adapter/evaluate.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# parametry: \n# 1. co chces evaluovat: lastfm, demo\n# 2. cislo iterace\n\n\nLASTFM=false\nDEMO=false\n\nfor param in $*;\ndo \n case $param in\n 'lastfm')\n LASTFM=true\n ;;\n 'demo')\n DEMO=true\n ;;\n esac\ndone\n\nif [ $LASTFM = true ]\nthen\n echo \"Creating test set.\"\n echo \"from lastfm.recommender import *; ArtistRecommender.ValidationPairClass.select_validation_pairs($2); quit()\" | python ./manage.py shell \n echo \"Building and evaluating recommender.\"\n echo \"from lastfm.recommender import *; ArtistRecommender.build(); ArtistRecommender.evaluate(); quit();\" | python ./manage.py shell\nfi\n\n# LASTFM\n#\n#\n\n# specificky pro lastfm - dva test sety\nfrom lastfm.models import *\nBaseArtistEvaluationPair.select()\n\n\n\n# evaluate novel mahout recommender:\n# \n\n# export training data\nfrom lastfm.mahout_recommender import *\nNovelMahoutArtistRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv')\n\n# export test data\nfrom lastfm.evaluation import *\nNovelArtistRankEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_test.csv')\n\n# run mahout train, test -> lastfm_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh lastfm\n\n# import predictions\nfrom lastfm.mahout_recommender import *\nNovelMahoutArtistRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_predictions.csv')\n\n# evaluate the predictions \nfrom lastfm.mahout_recommender import *\nfrom lastfm.evaluation import *\nNovelArtistRankEvaluator.evaluate_predictions(NovelMahoutArtistRecommender)\n\n# evaluate nenovel mahout recommender:\n# \n\n# export training data\nfrom lastfm.mahout_recommender import *\nMahoutArtistRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv')\n\n# export test data\nfrom lastfm.evaluation import *\nArtistRankEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_test.csv')\n\n# run mahout train, test -> lastfm_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh lastfm\n\n# import predictions\nfrom lastfm.mahout_recommender import *\nMahoutArtistRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_predictions.csv')\n\n# evaluate the predictions \nfrom lastfm.mahout_recommender import *\nfrom lastfm.evaluation import *\nArtistRankEvaluator.evaluate_predictions(MahoutArtistRecommender)\n\n\n\n# below\n#\n\n# build, \n#save_all_to_predictions must be true\n./build.sh lastfm\n\n# evaluate and save the predictions\nfrom lastfm.evaluation import *\nfrom lastfm.recommender import *\nNovelArtistRecommenderEvaluator.evaluate_predictions(NovelArtistRecommender, save_predictions=True)\n\n# export predictions\nfrom lastfm.recommender import *\nNovelArtistRecommender.export_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv')\n\n# export test data\nfrom lastfm.evaluation import *\nNovelArtistRankEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_test.csv')\n\n# run mahout train, test -> flixster_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh lastfm\n\n# update unresyst predictions with the obtained\nfrom lastfm.recommender import *\nNovelArtistRecommender.update_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_predictions.csv')\n\n# run evaluation\n./lastfmeval.sh novel dontbuild\n\n\n\n# recommendations - netreba\n# \n\n# evaluate unresyst\nfrom lastfm.evaluation import *\nfrom lastfm.recommender import *\nArtistRecommenderEvaluator.evaluate_recommendations(ArtistRecommender, 10)\n\n# evaluate mahout recommender\n# nenovel:\n\n# export training data\nfrom lastfm.mahout_recommender import *\nMahoutArtistRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv')\n\n# run mahout train, test -> lastfm_recommendations.csv\ncd ../mahout/mahoutrec\n./unresystrecommend.sh\n\n# import predictions\nfrom lastfm.mahout_recommender import *\nMahoutArtistRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_recommendations.csv')\n\n# evaluate the recommendations \nfrom lastfm.mahout_recommender import *\nfrom lastfm.evaluation import *\nArtistRecommenderEvaluator.evaluate_recommendations(MahoutArtistRecommender, 10)\n\n# evaluate mahout recommender\n# novel:\n\n# export training data\nfrom lastfm.mahout_recommender import *\nNovelMahoutArtistRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv')\n\n# run mahout train, test -> lastfm_recommendations.csv\ncd ../mahout/mahoutrec\n./unresystrecommend.sh\n\n# import predictions\nfrom lastfm.mahout_recommender import *\nNovelMahoutArtistRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_recommendations.csv')\n\n# evaluate the recommendations \nfrom lastfm.mahout_recommender import *\nfrom lastfm.evaluation import *\nNovelArtistRecommenderEvaluator.evaluate_recommendations(NovelMahoutArtistRecommender, 10)\n\n\n# Flixster\n#\n#\n\n# select test data - obecne\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.select_evaluation_pairs()\n\n# evaluate mahout recommender - predictions:\n#\n\n# export training data\nfrom flixster.mahout_recommender import *\nMahoutMovieRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv')\n\n# export test data\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_test.csv')\n\n# run mahout train, test -> lastfm_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh flixster\n\n# import predictions\nfrom flixster.mahout_recommender import *\nMahoutMovieRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_predictions.csv')\n\n# evaluate the predictions \nfrom flixster.mahout_recommender import *\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.evaluate_predictions(MahoutMovieRecommender)\n\n\n# evaluate mahout recommender - recommendations\n# \n\n# export training data\nfrom flixster.mahout_recommender import *\nMahoutMovieRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv')\n\n# run mahout train, test -> flixster_recommendations.csv\ncd ../mahout/mahoutrec\n./unresystrecommend.sh flixster\n\n# import predictions\nfrom flixster.mahout_recommender import *\nMahoutMovieRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_recommendations.csv')\n\n# evaluate the recommendations \nfrom flixster.mahout_recommender import *\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.evaluate_recommendations(MahoutMovieRecommender, 10)\n\n\n# hybrid\n#\n\n# build save_all_to_predictions must be true\n./build.sh flixster\n\n# export as usual\n\n# export training data\nfrom flixster.mahout_recommender import *\nMahoutMovieRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv')\n\n# export test data\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_test.csv')\n\n# run mahout train, test -> flixster_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh flixster\n\n# evaluate what we have and save the predictions\nfrom flixster.evaluation import *\nfrom flixster.recommender import *\nMovieRecommenderEvaluator.evaluate_predictions(MovieRecommender, save_predictions=True)\n\n# update unresyst predictions with the obtained\nfrom flixster.recommender import *\nMovieRecommender.update_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_predictions.csv')\n\n# run evaluation\n./flixstereval.sh preds dontbuild\n\n\n# below\n#\n\n# build, \n#save_all_to_predictions must be true\n./build.sh flixster\n\n# evaluate and save the predictions\nfrom flixster.evaluation import *\nfrom flixster.recommender import *\nMovieRecommenderEvaluator.evaluate_predictions(MovieRecommender, save_predictions=True)\n\n# export predictions\nfrom flixster.recommender import *\nMovieRecommender.export_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv')\n\n# export test data\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_test.csv')\n\n# run mahout train, test -> flixster_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh flixster\n\n# import predictions\nfrom flixster.mahout_recommender import *\nMahoutMovieRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_predictions.csv')\n\n# evaluate the predictions \nfrom flixster.mahout_recommender import *\nfrom flixster.evaluation import *\nMovieRecommenderEvaluator.evaluate_predictions(MahoutMovieRecommender)\n\n\n# Travel\n#\n#\n\n# select test data - obecne\nfrom travel.models import *\nTourOrderEvalPair.select()\n\n# evaluate mahout recommender - recommendations\n# \n\n# export training data\nfrom travel.mahout_recommender import *\nMahoutOrderTourRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_train.csv')\n\n# run mahout train, test -> travel_recommendations.csv\ncd ../mahout/mahoutrec\n./unresystrecommend.sh travel \n\n# import predictions\nfrom travel.mahout_recommender import *\nMahoutOrderTourRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_recommendations.csv')\n\n# evaluate the recommendations \nfrom travel.mahout_recommender import *\nfrom travel.evaluation import *\nOrderTourRecommenderEvaluator.evaluate_recommendations(MahoutOrderTourRecommender, 10)\n\n# evaluate mahout recommender - predictions\n# \n\n# export training data\nfrom travel.mahout_recommender import *\nMahoutOrderTourRecommender.export_data('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_train.csv')\n\n# export test data\nfrom travel.evaluation import *\nOrderTourRecommenderEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_test.csv')\n\n# run mahout train, test -> travel_recommendations.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh travel \n\n# import predictions\nfrom travel.mahout_recommender import *\nMahoutOrderTourRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_predictions.csv')\n\n# evaluate the predictions\nfrom travel.evaluation import *\nfrom travel.mahout_recommender import *\nOrderTourRankEvaluator.evaluate_predictions(MahoutOrderTourRecommender)\n\n\n# below\n#\n\n# build, \n#save_all_to_predictions must be true\n./build.sh travel\n\n# evaluate and save the predictions\nfrom travel.evaluation import *\nfrom travel.recommender import *\nOrderTourRecommenderEvaluator.evaluate_predictions(OrderTourRecommender, save_predictions=True)\n\n# export predictions\nfrom travel.recommender import *\nOrderTourRecommender.export_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_train.csv')\n\n# export test data\nfrom travel.evaluation import *\nOrderTourRecommenderEvaluator.export_evaluation_pairs('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_test.csv')\n\n# run mahout train, test -> flixster_predictions.csv\ncd ../mahout/mahoutrec\n./unresystpredict.sh travel\n\n# update unresyst predictions with the obtained\nfrom travel.recommender import *\nOrderTourRecommender.update_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_predictions.csv')\n\n# run evaluation\n./traveleval.sh preds dontbuild\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#############################\n\n# import predictions\nfrom travel.mahout_recommender import *\nMahoutOrderTourRecommender.import_predictions('/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_predictions.csv')\n\n# evaluate the predictions \nfrom travel.mahout_recommender import *\nfrom travel.evaluation import *\nOrderTourRankEvaluator.evaluate_predictions(MahoutOrderTourRecommender)\n\n" }, { "alpha_fraction": 0.5666928291320801, "alphanum_fraction": 0.5733941793441772, "avg_line_length": 42.68224334716797, "blob_id": "289c86e2785511d7f624b95de018ebc8dd829c1d", "content_id": "450b9f3c477029466ae38cc5422f510325ccfca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14027, "license_type": "no_license", "max_line_length": 118, "num_lines": 321, "path": "/code/adapter/unresyst/algorithm/algorithm.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The main classes of the algorithm package\"\"\"\n\nfrom django.db.models import Q\nfrom django.db.models import Avg\n\nfrom base import BaseAlgorithm\nfrom unresyst.constants import *\nfrom unresyst.models.abstractor import PredictedRelationshipDefinition, \\\n RelationshipInstance\nfrom unresyst.models.aggregator import AggregatedRelationshipInstance\nfrom unresyst.models.algorithm import RelationshipPredictionInstance\nfrom unresyst.models.common import SubjectObject\nfrom unresyst.recommender.rules import BaseRelationship\n\nclass Algorithm(BaseAlgorithm):\n \"\"\"The original deprecated implementation mixing combinator, compilator and algorithm. \n \n A simple implementation of a recommender algorithm.\n \n The remove_predicted_from_recommendations is implemented by adding a zero\n prediction for all pairs in predicted relationship. \n \n The get_recommendation function returns only recommendations with expectancy\n above zero or the expectancy_limit.\n \"\"\"\n \n N_NEIGHBOURHOOD = 10\n \"\"\"The maximum size of the neighbourhood, from which the similar items are taken\"\"\"\n\n # Build phase:\n #\n \n def build(self, recommender_model):\n \"\"\"Build the recommender - create the instances of the \n RelationshipPredictionInstance model where there is some simple \n prediction available. Where there isn't, leave it.\n \"\"\" \n\n\n\n # Recommend phase:\n #\n\n\n def get_relationship_prediction(self, recommender_model, dn_subject, dn_object, remove_predicted):\n \"\"\"See the base class for the documentation.\n \"\"\"\n # if predicted should be removed and the pair is in the predicted_rel, \n # return the special expectancy value\n if remove_predicted: \n # all predicted relationships\n qs_predicted = RelationshipInstance.filter_predicted(recommender_model)\n\n # the relationship between dn_subject and dn_object\n qs_predicted_rel = RelationshipInstance.filter_relationships(dn_subject, dn_object, queryset=qs_predicted)\n\n # if the prediction for the pair exists\n if qs_predicted_rel:\n \n # return the special expectancy value \n assert len(qs_predicted_rel) == 1\n predicted = qs_predicted_rel[0]\n\n return self._get_already_in_relatinship_prediction(\n recommender_model=recommender_model,\n predicted_relationship=predicted)\n \n \n # filter the predictions for recommender\n qs_rec_pred = RelationshipPredictionInstance.objects.filter(\n recommender=recommender_model)\n\n # filter the predictions for the given pair \n qs_pred = RelationshipPredictionInstance.filter_relationships(\n object1=dn_subject,\n object2=dn_object,\n queryset=qs_rec_pred) \n \n # if available return it\n if qs_pred: \n assert len(qs_pred) == 1\n return qs_pred[0]\n \n # if it's not available, maybe it wasn't in the N_NEIGHBOURHOOD, \n # so try finding it in aggregates \n so1, so2 = BaseRelationship.order_arguments(dn_subject, dn_object)\n\n # the Subject-object aggregate (just in case it hasn't been built) \n qs_rels = AggregatedRelationshipInstance.objects.filter(\n subject_object1=so1,\n subject_object2=so2,\n recommender=recommender_model)\n \n # if found return it\n if qs_rels:\n assert len(qs_rels) == 1\n\n pred = RelationshipPredictionInstance(\n subject_object1=dn_subject,\n subject_object2=dn_object,\n description=qs_rels[0].description,\n recommender=recommender_model,\n expectancy=qs_rels[0].expectancy\n )\n pred.save()\n return pred\n\n # the definition of the predicted relationship\n d = PredictedRelationshipDefinition.objects.get(recommender=recommender_model)\n \n # try finding the similar entity to the one liked by so1\n # content-based \n \n # get similarities starting the traverse with the similarity.\n qs_sim1 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model)\\\n .filter(\n Q(\n # take so2 as stable - in the subject_object2 position of the similarity\n subject_object2=so2,\n \n # traverse from the other object in similarity (subject_object1) through\n # the relationship instance, its subject (subject_object1) must be so1\n subject_object1__relationshipinstance_relationships2__subject_object1=so1,\n \n # the relationship instance definition must be the predicted relationship def\n subject_object1__relationshipinstance_relationships2__definition=d) | \\\n Q( \n # take so2 as stable again, now in the subject_object1 position of the similarity\n subject_object1=so2, \n \n # traverse from the other through relationship to so1\n subject_object2__relationshipinstance_relationships2__subject_object1=so1,\n \n # the definition again must be the predicted\n subject_object2__relationshipinstance_relationships2__definition=d))\n\n # if found return\n if qs_sim1:\n\n # if found return the average TODO nejak jinak\n avg = qs_sim1.aggregate(Avg('expectancy'))\n\n pred = RelationshipPredictionInstance(\n subject_object1=dn_subject,\n subject_object2=dn_object,\n description=qs_sim1[0].description,\n recommender=recommender_model,\n expectancy=avg['expectancy__avg']\n )\n pred.save()\n return pred \n\n \n # try finding the similar entity (user) to entity that liked so2\n # cf\n qs_sim2 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model)\\\n .filter(\n Q(\n # take so1 as stable - in the subject_object2 position of the similarity\n subject_object2=so1,\n \n # traverse from the other object in similarity (subject_object1) through\n # the relationship instance, its object (subject_object2) must be so2\n subject_object1__relationshipinstance_relationships1__subject_object2=so2,\n \n # the relationship instance definition must be the predicted relationship def\n subject_object1__relationshipinstance_relationships1__definition=d) | \\\n Q( \n # take so1 as stable again, now in the subject_object1 position of the similarity\n subject_object1=so1, \n \n # traverse from the other through relationship to so2\n subject_object2__relationshipinstance_relationships1__subject_object2=so2,\n \n # the definition again must be the predicted\n subject_object2__relationshipinstance_relationships1__definition=d))\n\n # if found return the average TODO nejak jinak\n if qs_sim2:\n avg = qs_sim2.aggregate(Avg('expectancy'))\n \n pred = RelationshipPredictionInstance(\n subject_object1=dn_subject,\n subject_object2=dn_object,\n description=qs_sim2[0].description,\n recommender=recommender_model,\n expectancy=avg['expectancy__avg']\n )\n pred.save()\n\n return pred \n \n \n # return the uncertain\n return self._get_uncertain_prediction(\n recommender_model=recommender_model, \n dn_subject=dn_subject, \n dn_object=dn_object\n )\n \n @classmethod\n def get_recommendations(self, recommender_model, dn_subject, count, expectancy_limit, remove_predicted):\n \"\"\"See the base class for documentation.\n \"\"\" \n \n # get the recommendations ordered by the expectancy from the largest\n recommendations = RelationshipPredictionInstance\\\n .get_relationships(obj=dn_subject)\\\n .filter(recommender=recommender_model, \n expectancy__gt=expectancy_limit)\\\n .order_by('-expectancy') \n \n # remove the predicted from the recommendations if it should be done \n if remove_predicted:\n \n # get objects that are already liked\n #\n \n qs_predicted = RelationshipInstance.filter_predicted(recommender_model) \n\n # get ids of subjectobjects where dn_subject appears\n qs_predicted = qs_predicted.filter( \n Q(subject_object1=dn_subject) | Q(subject_object2=dn_subject) \n ).values_list('subject_object1__pk', 'subject_object2__pk')\n \n # flatten it and take only the objects\n import itertools \n predicted_obj_ids = [i for i in itertools.chain(*qs_predicted) if i <> dn_subject.pk] \n\n # remove the already liked objects\n #\n \n # if it is a subjectobject we have to take both places\n if dn_subject.entity_type == ENTITY_TYPE_SUBJECTOBJECT:\n recommendations = recommendations.exclude(\n Q(subject_object1__id__in=predicted_obj_ids) \n | Q(subject_object2__id__in=predicted_obj_ids)\n )\n # otherwise one field is enough\n else:\n recommendations = recommendations.exclude(\n subject_object2__id__in=predicted_obj_ids\n )\n \n # if there should be more recommendations than we have and \n # the expectancy limit is below the uncertain, add uncertain \n # predictions\n if recommendations.count() < count \\\n and expectancy_limit < UNCERTAIN_PREDICTION_VALUE: \n\n object_ent_type = ENTITY_TYPE_SUBJECTOBJECT \\\n if recommender_model.are_subjects_objects \\\n else ENTITY_TYPE_OBJECT \n\n # all objects excluding the objects recommended to dn_subject\n # the last exclude is just for SO entity_type - we won't recommend \n # subject to himself\n uncertain_objects = SubjectObject.objects\\\n .filter(entity_type=object_ent_type, recommender=recommender_model)\\\n .exclude(relationshippredictioninstance_relationships1__subject_object2=dn_subject)\\\n .exclude(relationshippredictioninstance_relationships2__subject_object1=dn_subject)\\\n .exclude(pk=dn_subject.pk)\n \n # if predicted should be removed remove them\n if remove_predicted:\n uncertain_objects = uncertain_objects.exclude(id__in=predicted_obj_ids)\n \n # divide the recommendations into groups exp => uncertain, exp < uncertain\n positive_preds = recommendations.filter(\n expectancy__gte=UNCERTAIN_PREDICTION_VALUE)\n negative_preds = recommendations.filter(\n expectancy__lt=UNCERTAIN_PREDICTION_VALUE)\n \n # how many non-positive recommendations we want\n required_count = count - positive_preds.count()\n uncertain_count = uncertain_objects.count()\n \n # if there're enough uncertains to fill the recommendations, \n # clear the negative\n if uncertain_count >= required_count:\n uncertain_required_count = required_count\n negative_preds = []\n else:\n # otherwise we want all uncertains we have\n uncertain_required_count = uncertain_count\n\n negative_required_count = required_count - uncertain_count\n \n # and for the rest use negative - only those above limit\n negative_preds = negative_preds.filter(\n expectancy__gt=expectancy_limit)[:negative_required_count]\n\n uncertain_preds = []\n \n # construct the uncertain recommendations\n for obj in uncertain_objects[:uncertain_required_count]:\n pred = self._get_uncertain_prediction(recommender_model, dn_subject, obj)\n uncertain_preds.append(pred)\n\n return list(positive_preds) + uncertain_preds + list(negative_preds)\n\n # apply the limit for count\n return list(recommendations[:count]) \n \n\n\n\nclass PredictOnlyAlgorithm(Algorithm):\n \"\"\"An algorithm that doesn't need to be built, performs only predictions.\"\"\"\n \n @classmethod\n def build(self, recommender_model):\n \"\"\"Do nothing\"\"\"\n return\n\n @classmethod\n def get_recommendations(self, recommender_model, dn_subject, count, expectancy_limit, remove_predicted): \n \"\"\"Raise an error\"\"\"\n raise NotImplementedError() \n\n" }, { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.8039215803146362, "avg_line_length": 28.14285659790039, "blob_id": "a484a682b0fc5c982c0fc6ff7cf9830676019511", "content_id": "64faa2b3c3f268bf2b3bbde6854976dfc739be87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 65, "num_lines": 7, "path": "/UnresystCD/code/adapter/unresyst/models/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The models for universal object and subject representation.\"\"\"\n\nfrom common import *\nfrom abstractor import *\nfrom aggregator import *\nfrom algorithm import *\nfrom evaluation import BaseEvaluationPair\n" }, { "alpha_fraction": 0.767624020576477, "alphanum_fraction": 0.7780678868293762, "avg_line_length": 41.66666793823242, "blob_id": "2e74aacfa2f641a63cab4215ad7f65d7f104fbb7", "content_id": "6c8174bf9ac2980362b139acc357b55dc8bfb19b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 383, "license_type": "no_license", "max_line_length": 140, "num_lines": 9, "path": "/README.md", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "**a recommender you can't resist ...**\n\nA prototype of a Universal Recommender System created as a part of my Master thesis.\n\nThe thesis was selected to the second round of the 2011 ACM Spy competition. See http://www.acm-spy.cz/o-soutezi-acm-spy/galerie-nejlepsich/\n\n[Mahout tutorial created in the process](docs/CreateMahoutRecommender.md)\n\n![Awesome Mindmap](docs/pics/thesis.png)" }, { "alpha_fraction": 0.6669912338256836, "alphanum_fraction": 0.6689386367797852, "avg_line_length": 21.30434799194336, "blob_id": "0d4fcd4835f521b92feba9b34fdf57d2934fa3db", "content_id": "dc47bba31c34243b602de93b48388b652365c4d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 180, "num_lines": 46, "path": "/code/adapter/flixstereval.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# run the evaluation- recommendations. parameters \n# recs -> recommendations\n# preds -> predictions\n# dontbuild-> don't build\n\nBUILD=true\nRECS=false\nPREDS=false\n\nfor param in $*;\ndo \n case $param in\n 'recs')\n RECS=true\n ;;\n 'preds')\n PREDS=true\n ;;\n 'dontbuild')\n BUILD=false\n ;;\n esac\ndone\n\nif [ $BUILD = true ]\nthen\n # build\n echo \"Building...\"\n echo \"from flixster.recommender import *; MovieRecommender.build()\" | python ./manage.py shell\nfi\n\nif [ $RECS = true ]\nthen\n echo \"Evaluating recommendations...\"\n echo \"from flixster.evaluation import *; from flixster.recommender import *; MovieRecommenderEvaluator.evaluate_recommendations(MovieRecommender, 10)\"| python ./manage.py shell\nfi\n\nif [ $PREDS = true ]\nthen\n echo \"Evaluating predictions...\"\n echo \"from flixster.evaluation import *; from flixster.recommender import *; MovieRecommenderEvaluator.evaluate_predictions(MovieRecommender)\"| python ./manage.py shell\nfi\n\necho \"\"\n\n" }, { "alpha_fraction": 0.6990595459938049, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 25.41666603088379, "blob_id": "d3ada56d0457dd1687f89e0a71e4c80664ce37f2", "content_id": "41e744138d8492ced894d29c82cb75314744fc87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 62, "num_lines": 12, "path": "/UnresystCD/code/adapter/lastfm/constants.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The constants for last.fm\"\"\"\n\nMAX_LENGTH_NAME = 200\n\nCROSS_VALIDATION_COUNT = 5\n\"\"\"The number of test sets for the cross-validation\"\"\"\n\nSUCCESS_LIMIT = 0.5\n\"\"\"The limit above which the pair is considered successful.\"\"\"\n\nEXPECTED_EXPECTANCY_LISTENED = 1.0\n\"\"\"The expected expectancy for the listened to artists.\"\"\"\n\n\n" }, { "alpha_fraction": 0.5753098130226135, "alphanum_fraction": 0.586749255657196, "avg_line_length": 35.48695755004883, "blob_id": "91f7f98bac6c9825d846bc4908b450a2160faab3", "content_id": "88fedd9c7d16e9e763b85e8db64f69241d3ae8f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12588, "license_type": "no_license", "max_line_length": 102, "num_lines": 345, "path": "/UnresystCD/code/adapter/unresyst/models/symmetric.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Support for symmetric relationships\"\"\"\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.core.exceptions import MultipleObjectsReturned\n\nfrom unresyst.exceptions import SymmetryError, UnresystError as BaseError\n\nclass SymmetricalRelationship(models.Model):\n \"\"\"A base class for all symmetrical relationship models. The relationship can't be \n between the same object.\n \n Main custom methods:\n - rel.get_related(obj): object related to obj on rel\n - rel.contains_object(obj): does the rel contain obj?\n\n Only for additional_unique == ():\n - cls.get_related_objects(obj): objects related to obj\n - cls.get_relationships(obj): relationship objects related to obj\n - cls.get_relationship(obj1, obj2): get relationship between obj1 and obj2, or None\n - cls.are_related(obj1, obj2): are obj1 and obj2 related?\n\n Also for additional_unique <> ():\n - cls.filter_relationships(obj1, obj2, queryset=None): filter relationships between\n obj1 and obj2, from queryset or from all relationships\n\n \"\"\"\n \n attr_name1 = ''\n \"\"\"The name of the first attribute pointing on some model. Should be overriden\"\"\"\n \n attr_name2 = ''\n \"\"\"The name of the second attribute pointing on some model. Should be overriden\"\"\"\n \n additional_unique = ()\n \"\"\"A tuple containing names of attributes that are unique together with the symmetrical\n relationship pair. Like unique_together = (('attr1', 'attr2'), 'additional_unique')\n \"\"\"\n\n class Meta:\n abstract = True\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n object1 = getattr(self, self.attr_name1)\n object2 = getattr(self, self.attr_name2)\n \n return u\"%s - %s\" % (object1, object2)\n \n def save(self, *args, **kwargs):\n \"\"\"Overrides the save method in order to control the symmetricity.\n\n @raise SymmetryError: if there's something wrong with the saved entities in the\n means of symmetry \n \"\"\"\n \n # get the objects that should be saved as related\n object1 = getattr(self, self.attr_name1)\n object2 = getattr(self, self.attr_name2)\n \n # if they're identical, raise an error\n # the equality of entity models is already solved by overriding the __eq__ method\n if object1 == object2: \n raise SymmetryError(\n message=\"The related objects can't be identical\",\n object1=object1, \n object2=object2)\n \n # if the relationship exists in some direction\n # raise another error.\n # \n # try the saving direction\n self.__check_save(object1, object2)\n \n # try the other direction\n self.__check_save(object2, object1)\n \n # save the relationship \n return super(SymmetricalRelationship, self).save(*args, **kwargs) \n \n \n def __check_save(self, object1, object2):\n \"\"\"Check whether the the relationship between the objects in the given direction\n can be saved. If not, throw an exception. Includes checks for update.\"\"\"\n \n # create a dictionary of additional kwargs\n additional_kwargs = {}\n for ad_un in self.additional_unique:\n additional_kwargs[ad_un] = getattr(self, ad_un)\n \n # try finding the relation in the database\n rel = self.__class__.__get_relation(object1, object2, **additional_kwargs)\n\n # if we have found a relation between object1 and object2\n if rel:\n \n # if it's new value to insert (no id set)\n # or it's a value to update, and the found relationship isn't the same \n # as the one we're saving\n if not self.id or \\\n (self.id and self.id != rel.id):\n \n # than it's an error. \n raise SymmetryError(\n message=\"The relationship is already in the database.\",\n object1=object1, \n object2=object2) \n\n @classmethod\n def __get_related_objects_in_attribute(cls, obj, attr_name1, attr_name2):\n \"\"\"Get objects related to obj that are in the attr_name2 attribute...\n obj is in the attr_name1 attribute.\"\"\"\n \n # construct the kwargs\n kwargs = {attr_name1: obj}\n \n # get the relationships where obj is in the first attribute\n relationships = cls.objects.filter(**kwargs)\n \n # get the objects that are in the second attribute\n related_objects = [getattr(relationship, attr_name2) for relationship in relationships]\n \n return related_objects\n \n\n @classmethod \n def get_related_objects(cls, obj):\n \"\"\"Get objects that are in relationship with obj.\n \n Returns just objects (not the whole binding items).\n \n @type obj: core.models.BaseEntity\n @param obj: the object whose related objects should be got\n \n @rtype: list of core.models.BaseEntity\n @return: the list of objects related to obj\n \"\"\"\n # relationships where obj is as the first\n related_objects1 = cls.__get_related_objects_in_attribute(obj, cls.attr_name1, cls.attr_name2)\n\n # relationships where obj is as the second \n related_objects2 = cls.__get_related_objects_in_attribute(obj, cls.attr_name2, cls.attr_name1)\n \n return related_objects1 + related_objects2\n \n @classmethod \n def get_relationships(cls, obj):\n \"\"\"Get bindings for objects that are in relationship with the object obj.\n \n Returns whole binding items (not just objects).\n \n The related objects can be got using get_related. Example:\n\n connections = get_relationships(pepa)\n \n for connection in connections:\n print connection.connection_comment\n \n object_connected_to_pepa = connection.get_related(pepa)\n # ...\n \n @type obj: core.models.BaseEntity\n @param obj: the object whose relationships should be got\n \n @rtype: QuerySet of relationship class\n @return: relationships related to obj \n \"\"\"\n # get relationships where there is cls at first or second place\n # \n kwargs1 = {cls.attr_name1: obj}\n kwargs2 = {cls.attr_name2: obj}\n\n from django.db.models import Q\n q = cls.objects.filter(\n Q(**kwargs1) | Q(**kwargs2)\n )\n\n return q\n \n \n def get_related(self, obj):\n \"\"\"On a relationship, get the object that is related to obj, no matter in which\n attribute it is.\n \n To be used in the combination with the get_relationships method.\n\n @type obj: core.models.BaseEntity\n @param obj: the object whose related object should be got\n \n @rtype: core.models.BaseEntity\n @return: the object in the relationship no matter in which attribute it is.\n \n @raise SymmetryError: if the obj isn't a part of the relationship on which \n the method is called\n \"\"\"\n # get the objects in the relationship\n object1 = getattr(self, self.attr_name1)\n object2 = getattr(self, self.attr_name2)\n \n # if obj ist the first one, return the second\n # the equality of entity models is already solved by overriding the __eq__ method\n if obj == object1: \n return object2\n \n # if it's the second, return the first\n if obj == object2:\n return object1\n \n # otherwise return an error\n raise SymmetryError(\n message=\"The relationship object doesn't contain the '%s' object\" % obj,\n object1=object1, \n object2=object2) \n \n def contains_object(self, obj):\n \"\"\"Does the relationship contain the obj object? On one of the sides.\n \n @type obj: object\n @param obj: the object to check whether it's in the relationship\n \n @rtype: bool\n @return: Whether the object is contained on one side of the relationship\n \"\"\"\n\n # get the objects in the relationship\n object1 = getattr(self, self.attr_name1)\n object2 = getattr(self, self.attr_name2) \n \n return obj==object1 or obj==object2\n\n @classmethod\n def __contains_relation(cls, object1, object2):\n \"\"\"Does model in database contain the relationship of object1 and object2?\"\"\"\n \n return bool(cls.__get_relation(object1, object2))\n \n @classmethod\n def __get_relation(cls, object1, object2, **kwargs): \n \"\"\"Get the relation of the two objects in the given direction. If it doesn't exist\n return None.\n kwargs are additional arguments for filter, when additional_unique <> ()\n \"\"\"\n \n # prepare kwargs for the filter function\n filter_kwargs = {\n cls.attr_name1: object1,\n cls.attr_name2: object2\n }\n filter_kwargs.update(kwargs)\n \n # try to get the relationship\n query = cls.objects.filter(**filter_kwargs)\n\n # if some exists\n if query:\n if query.count() > 1:\n raise MultipleObjectsReturned(\n 'Too many obtained relationships for the pair %s, %s.' % (object1, object2))\n \n # return it \n return query[0]\n \n # otherwise there's nothing\n return None \n \n \n @classmethod\n def get_relationship(cls, object1, object2):\n \"\"\"Get the relationship between the two objects if they're related.\n If not return None.\n \n Takes into account both directions of the symmetry.\n \n @type object1: Model\n @param object1: the first object \n \n @type object2: Model\n @param object2: the second object \n \n @rtype: relationship object\n @return: the relationship between the two objects or None if they aren't related\n \"\"\" \n # try the first direction\n relationship = cls.__get_relation(object1, object2)\n if relationship:\n return relationship \n \n # try the second\n return cls.__get_relation(object2, object1) \n\n @classmethod\n def filter_relationships(cls, object1, object2, queryset=None):\n \"\"\"Filter relationships so that it contains only the ones between \n object1 and object2. \n For models where sets (object1, object2) are unique does the same \n as get_relationships (except returning a queryset)\n\n @type object1: Model\n @param object1: the first object \n \n @type object2: Model\n @param object2: the second object \n \n @type queryset: django queryset\n @param queryset: the queryset to filter (optional). If not passed, all\n objects of the model are filtered, otherwise the passed queryset is\n filtered.\n \n @rtype: django queryset\n @return: queryset containing relationships between object1 and object2\n \"\"\"\n if queryset is None:\n queryset = cls.objects.all()\n # prepare the filter kwargs:\n kwargs1 = {\n cls.attr_name1: object1,\n cls.attr_name2: object2\n } \n kwargs2 = {\n cls.attr_name1: object2,\n cls.attr_name2: object1\n }\n\n # filter relationships that have the pair in the given or the opposite\n # direction \n return queryset.filter(Q(**kwargs1) | Q(**kwargs2)) \n\n @classmethod\n def are_related(cls, object1, object2):\n \"\"\"Is the object related to other object?\n \n Takes into account both directions of the symmetry.\n \n @type object1: Model\n @param object1: the first object \n \n @type object2: Model\n @param object2: the second object \n \n @rtype: bool\n @return: True if they're in relationship, False otherwise. \n \"\"\" \n \n return cls.__contains_relation(object1, object2) \\\n or cls.__contains_relation(object2, object1)\n" }, { "alpha_fraction": 0.5918495059013367, "alphanum_fraction": 0.5918495059013367, "avg_line_length": 29.673076629638672, "blob_id": "c3de9e6129bcafd863cd330ac281345fb563cc8c", "content_id": "7749ad6678f29b47d38275ab1ff6d16d157c5669", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1595, "license_type": "no_license", "max_line_length": 83, "num_lines": 52, "path": "/UnresystCD/code/adapter/unresyst/recommender/predictions.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The prediction classes. Instances of the class are returned by \nthe recommender.\n\"\"\"\n\nclass RelationshipPrediction(object):\n \"\"\"The prediction of the predicted_relationship appearing between\n the given subject-object pair.\n \n @type subject: the domain-specific subject\n @ivar subject: the subject \n \n @type object_: the domain-specific object\n @ivar object_: the object\n \n @type expectancy: float\n @ivar expectancy: the estimated probability of the predict_relationship\n occuring between the subject and the object\n \n @type explanation: str\n @ivar explanation: the explanation for the prediction \n \"\"\"\n \n def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):\n \"\"\"The initializer\"\"\"\n \n self.subject = subject\n \"\"\"The subject\"\"\"\n \n self.object_ = object_\n \"\"\"The object\"\"\"\n \n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n \n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n \n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u\"%s <- %s: %f, %s\" % (\n self.subject, \n self.object_, \n self.expectancy, \n self.explanation\n )\n \n def __repr__(self):\n return \"< %s >\" % str(self.__unicode__())\n" }, { "alpha_fraction": 0.5630881786346436, "alphanum_fraction": 0.5655280351638794, "avg_line_length": 37.911563873291016, "blob_id": "0116196a027d45f1e52daa1218f4d26d0ea5222f", "content_id": "3e2969c510c10aabccea92e3412b06b5eac85d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5738, "license_type": "no_license", "max_line_length": 102, "num_lines": 147, "path": "/UnresystCD/code/adapter/unresyst/recommender/clusters.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The classes for defining clusters by users\"\"\"\n\nfrom unresyst.models.abstractor import ClusterSet, Cluster, ClusterMember\nfrom unresyst.models.common import SubjectObject\nfrom unresyst.exceptions import ConfigurationError\nfrom unresyst.constants import *\n\nclass BaseClusterSet(object):\n \"\"\"The base class for all clusters sets. Cluster set is a set of clusters,\n e.g. cluster set 'Gender' contains clusters 'Male', 'Female'.\"\"\"\n \n entity_type = None\n \"\"\"The type of the entities in the clusters\"\"\" \n \n entity_format_str = None\n \"\"\"Formating string used in the description\"\"\" \n \n def __init__(self, name, weight, filter_entities, get_cluster_confidence_pairs, description=None):\n \"\"\"The initializer\"\"\"\n\n self.name = name\n \"\"\"The name of the rule/relationship.\"\"\"\n \n self.weight = weight\n \"\"\"The significance of the similarity inferred from two subject/objects\n belonging to the same cluster. A number from [0, 1]\"\"\"\n \n self.filter_entities = filter_entities\n \"\"\"A queryset of all entities of the given type (s/o/so) belonging\n to some cluster.\n \"\"\"\n \n self.get_cluster_confidence_pairs = get_cluster_confidence_pairs\n \"\"\"A generator returning pairs of cluster, confidence for the given entity.\n E.g. for 'Liars' it returns (('Punk', 0.41), ('Indie', 0.72))\n Parameters: domain specific entity\n Return: the pairs: (name of the cluster, confidence - number from [0, 1])\n \"\"\"\n \n self.description = description\n \"\"\"The description of the membership. Can contain: placeholders for \n subject/object/subjectobject and cluster.\"\"\"\n\n \n def evaluate(self):\n \"\"\"Crate the cluster set in the database, its clusters, bindings \n of subjectobjects to the clusters.\n \"\"\"\n \n if not (MIN_WEIGHT <= self.weight <= MAX_WEIGHT):\n raise ConfigurationError(\n message=(\"The set '%s' provides weight %f,\" + \n \" should be between 0 and 1. .\"\n ) % (self.name, self.weight),\n recommender=self.recommender,\n parameter_name=\"Recommender.cluster_sets\",\n parameter_value=(self.recommender.cluster_sets)\n )\n \n recommender_model = self.recommender._get_recommender_model()\n\n # create the cluster set in the database\n cluster_set = ClusterSet(\n name=self.name,\n recommender=recommender_model,\n entity_type=self.entity_type,\n weight=self.weight)\n \n cluster_set.save()\n \n # go through the entities create clusters on demand\n #\n \n for ds_entity in self.filter_entities:\n \n # get entity cluster-confidence pairs\n cluster_conf_pairs = self.get_cluster_confidence_pairs(ds_entity)\n \n # convert the entity to universal\n dn_entity = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=ds_entity, \n entity_type=self.entity_type, \n recommender=recommender_model)\n \n # go through the entity clusters\n for cluster_name, confidence in cluster_conf_pairs:\n \n # if confidence invalid through an error\n if not (MIN_CONFIDENCE <= confidence <= MAX_CONFIDENCE):\n raise ConfigurationError(\n message=(\"The cluster set '%s' provides confidence %f,\" + \n \" should be between 0 and 1. For cluster %s.\"\n ) % (self.name, confidence, cluster_name),\n recommender=self.recommender,\n parameter_name=\"Recommender.cluster_sets\",\n parameter_value=(self.recommender.cluster_sets)\n )\n \n # get or create the cluster \n cluster, x = Cluster.objects.get_or_create(\n name=cluster_name[:MAX_LENGTH_NAME],\n cluster_set=cluster_set)\n\n # evaluate the description\n if self.description:\n description = self.description % {\n self.entity_format_str: dn_entity.name,\n FORMAT_STR_CLUSTER: cluster_name} \n else:\n description = '' \n \n # save the binding of the cluster to the dn_entity\n member = ClusterMember.objects.create(\n cluster=cluster,\n member=dn_entity,\n confidence=confidence,\n description=description)\n \n print \" %d clusters and %d cluster members for '%s' cluster set created.\" \\\n % (Cluster.objects.filter(cluster_set=cluster_set).count(), \n ClusterMember.objects.filter(cluster__cluster_set=cluster_set).count(),\n self.name)\n \n \n \n\nclass SubjectClusterSet(BaseClusterSet):\n \"\"\"Cluster set for subjects\"\"\"\n \n entity_type = ENTITY_TYPE_SUBJECT\n \n entity_format_str = FORMAT_STR_SUBJECT\n\n \nclass ObjectClusterSet(BaseClusterSet):\n \"\"\"Cluster set for objects\"\"\"\n \n entity_type = ENTITY_TYPE_OBJECT\n\n entity_format_str = FORMAT_STR_OBJECT\n \nclass SubjectObjectClusterSet(BaseClusterSet):\n \"\"\"Clusters for recommenders where subject domain == object domain\"\"\"\n \n entity_type = ENTITY_TYPE_SUBJECTOBJECT\n \n entity_format_str = FORMAT_STR_SUBJECTOBJECT\n \n \n" }, { "alpha_fraction": 0.6540930867195129, "alphanum_fraction": 0.6540930867195129, "avg_line_length": 35.64706039428711, "blob_id": "4d7905dc524c2474531e811f3a1aae03f13e6d2f", "content_id": "f8b6a8b980ba159a6a68de40716df35f3b869fa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1246, "license_type": "no_license", "max_line_length": 91, "num_lines": 34, "path": "/code/adapter/unresyst/algorithm/aggregating_algorithm.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The aggregating algorithm class\"\"\"\n\nfrom base import BaseAlgorithm\n\nclass AggregatingAlgorithm(BaseAlgorithm):\n \"\"\"The algorithm that aggregates the similarity relationships and biases,\n uses the compiling algorithm as its inner_algorithm.\n \"\"\"\n \n def __init__(self, inner_algorithm, aggregator):\n \"\"\"The initializer\"\"\"\n \n super(AggregatingAlgorithm, self).__init__(inner_algorithm=inner_algorithm)\n \n self.aggregator=aggregator\n \"\"\"The aggregator that will be used for aggregating the relationships\n and biases\"\"\"\n \n def build(self, recommender_model):\n \"\"\"See the base class for documentation.\n \n Aggregates and calls the inner algorithm build\n \"\"\"\n \n # aggregate the relationships and rules\n self.aggregator.aggregate_rules_relationships(\n recommender_model=recommender_model) \n \n # aggregate the biases\n self.aggregator.aggregate_biases(recommender_model=recommender_model)\n \n print \"Rules, relationships and biases aggregated. Building the inner algorithm...\"\n \n super(AggregatingAlgorithm, self).build(recommender_model=recommender_model)\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 34, "blob_id": "679060bf3bd6466a49a1124ea8f36296e390d08a", "content_id": "833db83b59cc5685a7a8c8a23cf84f40fb5faad7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 64, "num_lines": 3, "path": "/code/adapter/unresyst/abstractor/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The package contains all classes for the abstractor layer.\"\"\"\n\nfrom abstractor import BasicAbstractor\n" }, { "alpha_fraction": 0.649068295955658, "alphanum_fraction": 0.6537266969680786, "avg_line_length": 25.58333396911621, "blob_id": "60fb7f1a8af2838251c1467c5ac671819f0b936d", "content_id": "24ec29c615b0a824413690c8dc8206e22887bb0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 66, "num_lines": 48, "path": "/UnresystCD/code/adapter/demo/views.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The views for the demo application\"\"\"\n\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, Context, loader\nfrom django.shortcuts import render_to_response, get_object_or_404\n\n\nfrom models import User\nfrom recommender import ShoeRecommender\n\ndef view_recommendations(request, user_name):\n \"\"\"The view for the demo recommendations\"\"\"\n \n # get the user and user list from db\n user = get_object_or_404(User, name__iexact=user_name)\n user_list = User.objects.all()\n \n # create user recommendations\n recommendations = ShoeRecommender.get_recommendations(user)\n \n context = {\n 'user': user,\n 'user_list': user_list,\n 'recommendations': recommendations,\n 'show_expectancy': True,\n }\n\n return render_to_response(\n 'demo/shoe_recommendations.html',\n dictionary=context, \n context_instance=RequestContext(request)\n )\n \ndef view_home_page(request):\n \"\"\"The view for the demo home page\"\"\"\n\n # get the user list from db\n user_list = User.objects.all()\n \n context = {\n 'user_list': user_list,\n }\n\n return render_to_response(\n 'demo/home_page.html',\n dictionary=context, \n context_instance=RequestContext(request)\n ) \n \n \n" }, { "alpha_fraction": 0.7196261882781982, "alphanum_fraction": 0.7196261882781982, "avg_line_length": 34.66666793823242, "blob_id": "289625693b56eb880e7853b71d3a9da2dfe44d3a", "content_id": "88fdadf5a9aed29723056a56032d930de9d36b85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 80, "num_lines": 3, "path": "/code/adapter/unresyst/tests/test_update.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Testing the update\"\"\"\n\n# idea - porovnat vysledky recommenderu po updatu a po rebuildu - mely by byt stejne\n" }, { "alpha_fraction": 0.521481454372406, "alphanum_fraction": 0.5237036943435669, "avg_line_length": 41.44094467163086, "blob_id": "f4c944268e3c3cf02f08c1ccd2f6057c2662cc16", "content_id": "034d4cc30e78aff70b78a057e7aa4392dfe5b7f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5400, "license_type": "no_license", "max_line_length": 91, "num_lines": 127, "path": "/code/adapter/unresyst/tests/test_base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The base classes for the tests used in unresyst\"\"\"\n\nfrom django.test import TestCase\n\nfrom unresyst.models.common import SubjectObject \n\nfrom demo.recommender import ShoeRecommender, AverageRecommender\nfrom demo.models import User, ShoePair\n\nclass DBTestCase(TestCase):\n \"\"\"A base class for all tests which need database testing data\"\"\"\n\n def setUp(self):\n \"\"\"Insert data into the database\"\"\"\n\n # insert test data\n from demo.save_data import save_data\n save_data()\n \n def save_entities(self):\n \"\"\"Save instances of the entities to the testcase isntance\n to be called in subclasses.\n \n Not distinguishing the recommender.\"\"\"\n \n self.specific_entities = {\n 'Alice': User.objects.get(name=\"Alice\"),\n 'Bob': User.objects.get(name=\"Bob\"),\n 'Cindy': User.objects.get(name=\"Cindy\"),\n 'Daisy': User.objects.get(name=\"Daisy\"),\n 'Edgar': User.objects.get(name=\"Edgar\"),\n 'Fionna': User.objects.get(name=\"Fionna\"),\n 'Sneakers': ShoePair.objects.get(name=\"Sneakers\"),\n \"Rubber Shoes\": ShoePair.objects.get(name=\"Rubber Shoes\"),\n 'RS 130': ShoePair.objects.get(name='RS 130'),\n 'Design Shoes': ShoePair.objects.get(name='Design Shoes'),\n 'Octane SL': ShoePair.objects.get(name='Octane SL'), \n }\n \n rm = self.recommender._get_recommender_model()\n self.universal_entities = {\n 'Alice': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Alice'], \n entity_type='S', \n recommender=rm),\n 'Bob': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Bob'], \n entity_type='S', \n recommender=rm),\n 'Cindy': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Cindy'], \n entity_type='S', \n recommender=rm),\n 'Daisy': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Daisy'], \n entity_type='S', \n recommender=rm),\n 'Edgar': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Edgar'], \n entity_type='S', \n recommender=rm),\n 'Fionna': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Fionna'], \n entity_type='S', \n recommender=rm), \n 'Sneakers': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Sneakers'], \n entity_type='O', \n recommender=rm),\n 'Rubber Shoes': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Rubber Shoes'], \n entity_type='O', \n recommender=rm),\n 'RS 130': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['RS 130'], \n entity_type='O', \n recommender=rm),\n 'Design Shoes': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Design Shoes'], \n entity_type='O', \n recommender=rm), \n 'Octane SL': SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=self.specific_entities['Octane SL'], \n entity_type='O', \n recommender=rm), \n } \n\nclass TestBuild(DBTestCase):\n \"\"\"The base class performing build in the setup.\"\"\"\n\n def setUp(self):\n \"\"\"The setup for all tests - build the recommender\"\"\"\n super(TestBuild, self).setUp()\n\n # call the tested function \n ShoeRecommender.build()\n\n self.recommender = ShoeRecommender\n\nclass TestBuildAverage(DBTestCase):\n \"\"\"The base class performing AverageRecommender build in the setup\"\"\"\n \n def setUp(self):\n \"\"\"The setup for all tests - build the recommender\"\"\"\n super(TestBuildAverage, self).setUp()\n\n # call the tested function \n AverageRecommender.build()\n \n self.recommender = AverageRecommender\n \n self.save_entities()\n \n\nclass TestEntities(TestBuild):\n \"\"\"The base class adding universal and specific entitits \n to the test instance\n \"\"\"\n \n def setUp(self):\n \"\"\"Obtain specific and universal subject objects \n and store them in the test instance\n \"\"\" \n \n super(TestEntities, self).setUp()\n \n self.save_entities()\n \n\n" }, { "alpha_fraction": 0.7019230723381042, "alphanum_fraction": 0.7211538553237915, "avg_line_length": 28.714284896850586, "blob_id": "74f9f8da729072e90e61e65cb1f7c03ef577fb22", "content_id": "e9d1b16262fe2758fb6a519a8879452fd278a510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/code/adapter/demo/constants.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Constants for the demo application.\"\"\"\n\nMAX_LENGTH_NAME = 40\n\"\"\"The maximum length of the name in the universal representation.\"\"\"\n\nMAX_LENGTH_IMAGE_PATH = 50\n\"\"\"The maximum length of the path to image\"\"\"\n" }, { "alpha_fraction": 0.5149863958358765, "alphanum_fraction": 0.5599455237388611, "avg_line_length": 32.318180084228516, "blob_id": "9abe4d84c17f395a26390477ed959a3bad67f354", "content_id": "1492be4f5e1bc974e9d0d001fa3fba7c297b9b3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 734, "license_type": "no_license", "max_line_length": 79, "num_lines": 22, "path": "/UnresystCD/code/dataset_scripts/process_travel.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "grep diplomka_time_order travel.sql -m 1 -n\n\n# shorten, remove unused\ntail -n +38 diplomka\\(2\\).sql > travel1.sql \nhead -n 302073 travel1.sql > travel2.sql\n\n# away with the insert part \nsed -e 's/.*(//' travel2.sql > travel3.sql \n\n# away with the end \nsed -e 's/);.*//' travel3.sql > travel4.sql \n\n# remove ', ' from places between appostrophes\nsed -e \"s/', '\\(.[^']*\\),\\([^']*\\)', '/', '\\1\\2', '/\" travel4.sql > travel5.sql\nsed -e \"s/', '\\(.[^']*\\),\\([^']*\\)', '/', '\\1\\2', '/\" travel5.sql > travel6.sql\nsed -e \"s/', '\\(.[^']*\\),\\([^']*\\)', '/', '\\1\\2', '/\" travel6.sql > travel7.sql\n\n# away with spaces behind commas \nsed -e 's/, /,/g' travel7.sql > travel8.sql\n\n# away with appostrophes \nsed -e \"s/'//g\" travel8.sql > travel9.sql\n\n" }, { "alpha_fraction": 0.6000116467475891, "alphanum_fraction": 0.6046186089515686, "avg_line_length": 42.78717803955078, "blob_id": "4b60e53462fc33d0c54b4c6ff40808d6e82690b0", "content_id": "06cf1f57e05adba80edb00c93f1600db52142aaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17148, "license_type": "no_license", "max_line_length": 126, "num_lines": 390, "path": "/UnresystCD/code/adapter/unresyst/combinator/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Base classes for the combinator layer:\n - BaseCombinator\n\"\"\"\nfrom unresyst.exceptions import CombinatorError\nfrom unresyst.models.abstractor import RelationshipInstance, RuleInstance, \\\n ExplicitRuleInstance, PredictedRelationshipDefinition, ClusterMember\nfrom unresyst.models.aggregator import AggregatedRelationshipInstance, AggregatedBiasInstance\nfrom unresyst.models.algorithm import RelationshipPredictionInstance\nfrom unresyst.constants import *\n\nclass BaseCombinator(object):\n \"\"\"The base class defining the interface of all combinators.\n interface methods:\n - combine_pair_similarities\n - combine_entity_biases\n - combine_pair_prediction_elements\n - choose_promising_objects\n \n methods to be overriden:\n - _combine \n \n helper methods for subclasses:\n - _concat_descriptions \n \"\"\"\n \n DIVISOR = 3\n \"\"\"A constant for dividing the relationship members that would otherwise be \n too numerous\"\"\"\n \n def __init__(self):\n \"\"\"The initializer\"\"\"\n \n self.top_bias_objects = None\n\n def _checked_combine(self, combination_elements, ResultClass):\n \"\"\"Check if something was given and call the overriden _combine method\n \"\"\"\n if not combination_elements:\n raise CombinatorError(\"No combination_elements given\")\n return self._combine(combination_elements, ResultClass)\n \n def _combine(self, combination_elements, ResultClass):\n \"\"\"Combine the combination elements to produce an instance \n of the ResultClass, filling in its expectancy and description.\n \n Overriden in subclasses\n\n @type combination_elements: a list of BaseCombinationElement\n @param combination_elements: the elements that should be combined\n\n @type ResultClass: class\n @param ResultClass: the class which instance will be returned\n \n @rtype: ResultClass\n @return: the combination class with filled expectancy and description\n \"\"\"\n pass\n\n @staticmethod\n def _concat_descriptions(element_list):\n \"\"\"Concat descriptions of the elements in the element_list.\n \n @type element_list: a list of BaseCombinationElement\n @param element_list: a list of elements in order that they should\n appear\n \n @rtype: str\n @return: the string concatenation of the element descriptions\n \"\"\"\n list_len = len(element_list)\n \n assert list_len > 0\n \n if list_len == 1:\n return element_list[0].get_description()\n \n # shorten the list in case it's too long\n short_list = element_list[:MAX_REASONS_DESCRIPTION]\n \n # join the descriptions\n joined_desc = ' '.join([\"%s: %s\" % ((REASON_STR % i), e.get_description()) \\\n for e, i in zip(short_list, range(1, list_len + 1))])\n \n # if the list was shortened add a message\n if list_len > MAX_REASONS_DESCRIPTION:\n joined_desc += ' ' + MORE_REASONS_STR % (list_len - MAX_REASONS_DESCRIPTION)\n \n return joined_desc\n\n \n\n def combine_pair_similarities(self, combination_elements):\n \"\"\"Aggregate similarities of the given pair S-S, O-O, or SO-SO. \n \n @type combination_elements: a list of BaseCombinationElement\n @param combination_elements: the elements that should be combined \n \n @rtype: AggregatedRelationshipInstance\n @return: the aggregated relationship with filled expectancy and\n description, other fields are empty.\n \"\"\" \n return self._checked_combine(\n combination_elements=combination_elements, \n ResultClass=AggregatedRelationshipInstance)\n \n\n def combine_entity_biases(self, entity_biases):\n \"\"\"Aggregate biases of the given entity S, O, or SO.\n \n @type entitiy_biases: QuerySet, each member having a get_expectancy()\n method\n @param entitiy_biases: the biases to aggregate\n \n @rtype: float\n @return: the expectancy that the entity will be in \n a predicted_relationship, aggregated from its biases\n \"\"\"\n return self._checked_combine(\n combination_elements=entity_biases,\n ResultClass=AggregatedBiasInstance)\n\n\n def combine_pair_prediction_elements(self, combination_elements):\n \"\"\"Combine all preference sources producing predictions\"\"\"\n return self._checked_combine(\n combination_elements=combination_elements,\n ResultClass=RelationshipPredictionInstance)\n\n \n def choose_promising_objects(self, dn_subject, min_count):\n \"\"\"Choose at least min_count objects that are likely to be interesting\n for the dn_subject, using various preference sources, choosing by \n a heuristic.\n \n Get maximum of min_count objects from each category\n - aggregated object bias \n - s-o relationships \n - predicted_relationship + object_similarities\n - predicted_relationship + subject similarities\n - predicted_relationship + subject cluster memberships \n - predicted_relationship + object cluster memberships \n \n @type dn_subject: SubjectObject\n @param dn_subject: the subject to choose the objects for\n \n @type min_count: int\n @param min_count: the minimum count that is chosen (if it's available)\n \n @rtype: iterable of SubjectObject\n @return: the promising objects\n \"\"\" \n \n if not min_count:\n return [] \n \n recommender_model = dn_subject.recommender\n \n object_ent_type = ENTITY_TYPE_SUBJECTOBJECT \\\n if recommender_model.are_subjects_objects else \\\n ENTITY_TYPE_OBJECT\n \n # aggregated object bias\n #\n \n # use some caching\n if self.top_bias_objects is None:\n \n qs_biases = AggregatedBiasInstance.objects\\\n .filter(recommender=recommender_model, \n subject_object__entity_type=object_ent_type,\n expectancy__gt=UNCERTAIN_PREDICTION_VALUE)\\\n .order_by('-expectancy')\n \n self.top_bias_objects = [(b.subject_object, b.expectancy) for b in qs_biases[:min_count]]\n \n top_bias_objs = self.top_bias_objects\n \n # s-o rules, relationships, explicit rules\n #\n \n top_rel_objs = self._get_promising_objects_rules_relationships(\n dn_subject=dn_subject,\n min_count=min_count,\n recommender_model=recommender_model)\n \n \n # predicted_relationship + object_similarities\n # predicted_relationship + subject similarities\n \n top_sim_objs = self._get_promising_objects_similarities(\n dn_subject=dn_subject,\n min_count=min_count,\n recommender_model=recommender_model)\n \n \n\n # predicted_relationship + subject cluster memberships \n # predicted_relationship + object cluster memberships\n \n cluster_objs = self._get_promising_objects_clusters(\n dn_subject=dn_subject,\n min_count=min_count,\n recommender_model=recommender_model)\n\n # concat all the lists and sort by expectancy\n ret_list = top_bias_objs + top_rel_objs + top_sim_objs + cluster_objs\n \n # if the liked should be removed, remove the liked objects from promising\n if recommender_model.remove_predicted_from_recommendations:\n\n # get ids of the liked objects for dn subject\n pred_obj_ids = RelationshipInstance\\\n .filter_predicted(recommender_model=recommender_model)\\\n .filter(subject_object1=dn_subject)\\\n .values_list('subject_object2__pk', flat=True)\n \n pred_obj_ids = set(pred_obj_ids)\n \n # remove the liked objects from ret_list\n ret_list = filter(lambda pair: not(pair[0].pk in pred_obj_ids), ret_list)\n \n ret_list.sort(key=lambda pair: pair[1], reverse=True) \n \n # remove the duplicates and take only some of the first\n return list(set([obj for obj, x in ret_list]))[:int(PROMISING_RATE*min_count)]\n\n def _get_promising_objects_clusters(self, dn_subject, min_count, recommender_model):\n \"\"\"Get promising objects from predicted_relationship + cluster membership\n\n @return: list of pairs (object, expectancy)\n \"\"\"\n\n # the definition of the predicted relationship\n d = PredictedRelationshipDefinition.objects.get(recommender=recommender_model)\n \n # taking similar subjects from clusters\n #\n sim_subj_objs = []\n\n # go through the subject's memberships\n for cm1 in dn_subject.clustermember_set.order_by('-confidence')[:min_count/(2*self.DIVISOR)]:\n\n # take the most similar subjects\n for cm2 in cm1.cluster.clustermember_set.exclude(id=cm1.id).order_by('-confidence')[:min_count/(2*self.DIVISOR)]:\n \n # take objects connected with them\n for rel in cm2.member.relationshipinstance_relationships1.filter(definition=d)[:min_count/self.DIVISOR]:\n \n # use a approximation for overall expectancy\n sim_subj_objs.append((rel.subject_object2, \n (cm1.confidence * cm2.confidence)/2 + UNCERTAIN_PREDICTION_VALUE))\n \n # taking similar objects from clusters\n #\n \n qs_memberships = ClusterMember.objects.filter(\n cluster__cluster_set__recommender=recommender_model,\n cluster__clustermember__member__relationshipinstance_relationships2__definition=d,\n cluster__clustermember__member__relationshipinstance_relationships2__subject_object1=dn_subject)\\\n .order_by('-confidence', '-cluster__clustermember__confidence')\n\n sim_obj_objs = [(cm.member, cm.confidence/2 + UNCERTAIN_PREDICTION_VALUE) \\\n for cm in qs_memberships[:min_count]]\n \n return sim_subj_objs + sim_obj_objs\n \n def _get_promising_objects_rules_relationships(self, dn_subject, min_count, recommender_model):\n \"\"\"Get promising objects from subject-object rules, relationships,\n explicit rules\n \n @return: list of pairs (object, expectancy) \n \"\"\"\n \n # get what is available in instances\n\n # the relationship type we're interested in\n rel_type = RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT \\\n if recommender_model.are_subjects_objects else \\\n RELATIONSHIP_TYPE_SUBJECT_OBJECT \n \n # relationship instances by weight, some rules also may be chosen\n qs_rel = RelationshipInstance.objects.filter(\n definition__recommender=recommender_model,\n definition__rulerelationshipdefinition__relationship_type=rel_type,\n subject_object1=dn_subject,\n definition__rulerelationshipdefinition__is_positive=True)\\\n .distinct()\\\n .order_by('-definition__rulerelationshipdefinition__weight')\n\n # rule instances by confidence\n qs_rule = RuleInstance.objects.filter(\n definition__recommender=recommender_model,\n definition__rulerelationshipdefinition__relationship_type=rel_type,\n subject_object1=dn_subject,\n definition__rulerelationshipdefinition__is_positive=True)\\\n .distinct()\\\n .order_by('-confidence') \n\n # explicit feedback \n qs_exp_rel = ExplicitRuleInstance.objects.filter( \n definition__recommender=recommender_model,\n subject_object1=dn_subject,\n expectancy__gt=UNCERTAIN_PREDICTION_VALUE)\\\n .distinct()\\\n .order_by('-expectancy')\n\n return [(rel.subject_object2, rel.get_expectancy()) for rel in qs_rel[:min_count]] + \\\n [(rel.subject_object2, rel.get_expectancy()) for rel in qs_rule[:min_count]] + \\\n [(rel.subject_object2, rel.expectancy) for rel in qs_exp_rel[:min_count]] \n\n \n def _get_promising_objects_similarities(self, dn_subject, min_count, recommender_model):\n \"\"\"Get promising objects from \n predicted_relationship + aggregated similarities\n \n @return: list of pairs (object, expectancy)\n \"\"\"\n \n # the definition of the predicted relationship\n d = PredictedRelationshipDefinition.objects.get(recommender=recommender_model)\n \n # try finding the similar entities to the ones liked by dn_subject\n # content-based \n \n # get similarities starting the traverse with the similarity.\n cont_qs_sim1 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model, expectancy__gt=UNCERTAIN_PREDICTION_VALUE)\\\n .filter(\n # traverse from the other object in similarity (subject_object1) through\n # the relationship instance, its subject (subject_object1) must be so1\n subject_object1__relationshipinstance_relationships2__subject_object1=dn_subject,\n \n # the relationship instance definition must be the predicted relationship def\n subject_object1__relationshipinstance_relationships2__definition=d)\\\n .distinct()\\\n .order_by('-expectancy')\n\n cont_objs_sim1 = [(sim.subject_object2, sim.expectancy) for sim in cont_qs_sim1[:min_count]]\n\n cont_qs_sim2 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model, expectancy__gt=UNCERTAIN_PREDICTION_VALUE)\\\n .filter(\n # traverse from the other through relationship to so1\n subject_object2__relationshipinstance_relationships2__subject_object1=dn_subject,\n \n # the definition again must be the predicted\n subject_object2__relationshipinstance_relationships2__definition=d)\\\n .distinct()\\\n .order_by('-expectancy')\n \n cont_objs_sim2 = [(sim.subject_object1, sim.expectancy) for sim in cont_qs_sim2[:min_count]]\n \n # try finding the similar entity (user) to entity that liked so2\n # cf\n \n # when subject is in the second position in similarity\n cf_qs_sim1 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model, expectancy__gt=UNCERTAIN_PREDICTION_VALUE)\\\n .filter(\n # take dn_subject as stable - in the subject_object2 position of the similarity\n subject_object2=dn_subject,\n \n # the relationship instance definition must be the predicted relationship def\n subject_object1__relationshipinstance_relationships1__definition=d)\\\n .distinct()\\\n .order_by('-expectancy')\n\n # get the object behind the predicted_relationship \n cf_objs_sim1 = [(pref.subject_object2, sim.expectancy) \\\n for sim in cf_qs_sim1[:min_count/self.DIVISOR] \\\n for pref in sim.subject_object1.relationshipinstance_relationships1.filter(definition=d)[:min_count/self.DIVISOR]]\n\n # when subject is in the first position in similarity \n cf_qs_sim2 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model, expectancy__gt=UNCERTAIN_PREDICTION_VALUE)\\\n .filter(\n # take dn_subject as stable again, now in the subject_object1 position of the similarity\n subject_object1=dn_subject, \n \n # the definition again must be the predicted\n subject_object2__relationshipinstance_relationships1__definition=d)\\\n .distinct()\\\n .order_by('-expectancy')\n \n # get the object behind the predicted_relationship \n cf_objs_sim2 = [(pref.subject_object2, sim.expectancy) \\\n for sim in cf_qs_sim2[:min_count/self.DIVISOR] \\\n for pref in sim.subject_object2.relationshipinstance_relationships1.filter(definition=d)[:min_count/self.DIVISOR]]\n \n return cont_objs_sim1 + cont_objs_sim2 + cf_objs_sim1 + cf_objs_sim2\n \n \n\n \n \n \n \n" }, { "alpha_fraction": 0.407888799905777, "alphanum_fraction": 0.44402703642845154, "avg_line_length": 35.519229888916016, "blob_id": "77be0dd2ab0e6d6b28632408f382e8cc098d9a7b", "content_id": "9799347ab014baef3e09e67ea2786a89cb0ee061", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13310, "license_type": "no_license", "max_line_length": 243, "num_lines": 364, "path": "/code/adapter/unresyst/tests/test_combine_compile.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Tests for combinator, compilator.\"\"\"\nfrom nose.tools import eq_, assert_almost_equal\n\nfrom unresyst.models.common import SubjectObject\nfrom unresyst.combinator.base import BaseCombinator\nfrom unresyst.compilator.base import BaseCompilator\n\nfrom test_base import TestBuildAverage\n\nMIN_COUNT = 5\nPLACES = 4\n\nclass TestCombinator(TestBuildAverage):\n \"\"\"Tests for the base combinator\"\"\"\n\n EXPECTED_PROMISING_OBJECTS = {\n 'Alice': (('Sneakers', 'Design Shoes'), ('Rubber Shoes', 'Sneakers', 'Rubber Shoes', 'Design Shoes'), ('Rubber Shoes', 'RS 130', 'Sneakers'), ('Sneakers', 'Rubber Shoes')),\n 'Bob': (('Sneakers', 'Design Shoes'), ('Sneakers', 'Sneakers', 'Rubber Shoes'), ('Rubber Shoes', 'RS 130', 'Sneakers'), ('Sneakers', 'Rubber Shoes')),\n 'Cindy': ((), ('Rubber Shoes', 'RS 130'), (), ('Sneakers', 'Rubber Shoes')),\n 'Daisy': ((), ('RS 130',), (), ('Sneakers', 'Rubber Shoes')),\n 'Edgar': ((), (), ('Sneakers',), ('Sneakers', 'Rubber Shoes'), ('RS 130', 'Octane SL')),\n 'Fionna': ((), (), ('Rubber Shoes',), ('Sneakers', 'Rubber Shoes'), ('RS 130',)),\n }\n \"\"\"Dictionary subject: tuple( tuple from clusters, tuple from s-o rules, tuple from similarities, tuple from biases)\n \"\"\"\n\n def dtest_choose_promising_objects(self):\n \"\"\"Test choosing promising objects for subjects\"\"\"\n\n r = self.recommender._get_recommender_model()\n bc = BaseCombinator()\n\n for subj in SubjectObject.objects.filter(recommender=r, entity_type='S'):\n\n # choose it\n promobjs = bc.choose_promising_objects(dn_subject=subj, min_count=MIN_COUNT)\n \n # compare it\n eq_((subj, set(promobjs)), (subj, set([self.universal_entities[oname] for tup in self.EXPECTED_PROMISING_OBJECTS[subj.name] for oname in tup])))\n \nclass TestCompilator(TestBuildAverage):\n \"\"\"Tests for the base compilator\"\"\"\n EXPECTED_COMBINATION_ELEMENTS = {\n \n ('Alice', 'Sneakers'): (\n [(0.566667, 'User Alice likes many shoe pairs.'), (0.766667, 'Shoe pair Sneakers is popular')], \n [(0.550000, 'User Alice is from the same city as the manufacturer of Sneakers.')], \n [],\n [(0.612500, 'Similarity: Reason 1: Users Alice and Bob live in the same city. Reason 2: Users Alice and Bob are about the same age. And: User Bob likes shoes Sneakers.')],\n [],\n [],\n ),\n \n ('Alice', 'Rubber Shoes'): (\n [(0.566667, 'User Alice likes many shoe pairs.'), (0.633333, 'Shoe pair Rubber Shoes is popular')], \n [(0.700000, 'User Alice has viewed Rubber Shoes.'), (0.550000, 'User Alice is from the same city as the manufacturer of Rubber Shoes.')], \n [(0.625000, 'User Alice likes shoes Sneakers. And similarity: Reason 1: The shoe pairs Sneakers and Rubber Shoes share some keywords. Reason 2: Shoes Sneakers and Rubber Shoes were made by the same manufacturer.')],\n [],\n [],\n [],\n ), \n \n ('Alice', 'RS 130'): (\n [(0.566667, 'User Alice likes many shoe pairs.'), (0.633333, 'Shoe pair RS 130 is popular')], \n [(0.075000, \"Alice is from south, so RS 130 can't be recommended to him/her.\")],\n [],\n [],\n [],\n [],\n ),\n \n ('Alice', 'Design Shoes'): (\n [(0.566667, 'User Alice likes many shoe pairs.')], \n [],\n [(0.625000, 'User Alice likes shoes Sneakers. And similarity: Reason 1: Sneakers belong to the Casual category. Design Shoes belong to the Casual category. Reason 2: The shoe pairs Sneakers and Design Shoes share some keywords.')],\n [],\n [],\n [],\n ),\n \n ('Alice', 'Octane SL'): (\n [(0.566667, 'User Alice likes many shoe pairs.')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Bob', 'Sneakers'): (\n [(0.566667, 'User Bob likes many shoe pairs.'), (0.766667, 'Shoe pair Sneakers is popular')], \n [(0.700000, 'User Bob has viewed Sneakers.'), (0.550000, 'User Bob is from the same city as the manufacturer of Sneakers.')], \n [],\n [(0.612500, 'Similarity: Reason 1: Users Alice and Bob live in the same city. Reason 2: Users Alice and Bob are about the same age. And: User Alice likes shoes Sneakers.')],\n [],\n [],\n ), \n \n ('Bob', 'Rubber Shoes'): (\n [(0.566667, 'User Bob likes many shoe pairs.'), (0.633333, 'Shoe pair Rubber Shoes is popular')], \n [(0.550000, 'User Bob is from the same city as the manufacturer of Rubber Shoes.')], \n [(0.625000, 'User Bob likes shoes Sneakers. And similarity: Reason 1: The shoe pairs Sneakers and Rubber Shoes share some keywords. Reason 2: Shoes Sneakers and Rubber Shoes were made by the same manufacturer.')], \n [],\n [], \n [],\n ),\n \n ('Bob', 'RS 130'): (\n [(0.566667, 'User Bob likes many shoe pairs.'), (0.633333, 'Shoe pair RS 130 is popular')], \n [(0.075000, \"Bob is from south, so RS 130 can't be recommended to him/her.\")], \n [],\n [],\n [],\n [],\n ),\n \n ('Bob', 'Design Shoes'): (\n [(0.566667, 'User Bob likes many shoe pairs.')], \n [],\n [(0.625000, 'User Bob likes shoes Sneakers. And similarity: Reason 1: Sneakers belong to the Casual category. Design Shoes belong to the Casual category. Reason 2: The shoe pairs Sneakers and Design Shoes share some keywords.')],\n [],\n [],\n [],\n ),\n \n ('Bob', 'Octane SL'): (\n [(0.566667, 'User Bob likes many shoe pairs.')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Cindy', 'Sneakers'): (\n [(0.766667, 'Shoe pair Sneakers is popular')], \n [],\n [],\n [],\n [],\n [(0.550000, 'Similarity: Cindy has searched for the word Comfortable. Bob has searched for the word Comfortable. And: User Bob likes shoes Sneakers.')],\n ),\n\n \n ('Cindy', 'Rubber Shoes'): (\n [(0.633333, 'Shoe pair Rubber Shoes is popular')], \n [(0.700000, 'User Cindy has viewed Rubber Shoes.')],\n [],\n [],\n [],\n [],\n ),\n \n ('Cindy', 'RS 130'): (\n [(0.633333, 'Shoe pair RS 130 is popular')], \n [(0.550000, 'User Cindy is from the same city as the manufacturer of RS 130.')],\n [],\n [],\n [],\n [],\n ),\n \n ('Cindy', 'Design Shoes'): (\n [], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Cindy', 'Octane SL'): (\n [], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Daisy', 'Sneakers'): (\n [(0.766667, 'Shoe pair Sneakers is popular')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Daisy', 'Rubber Shoes'): (\n [(0.633333, 'Shoe pair Rubber Shoes is popular')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Daisy', 'RS 130'): (\n [(0.633333, 'Shoe pair RS 130 is popular')], \n [(0.550000, 'User Daisy is from the same city as the manufacturer of RS 130.')],\n [],\n [],\n [],\n [],\n ), \n \n ('Daisy', 'Design Shoes'): (\n [], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Daisy', 'Octane SL'): (\n [], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Edgar', 'Sneakers'): (\n [(0.633333, 'User Edgar likes many shoe pairs.'), (0.766667, 'Shoe pair Sneakers is popular')],\n [],\n [(0.625000, 'User Edgar likes shoes Rubber Shoes. And similarity: Reason 1: The shoe pairs Sneakers and Rubber Shoes share some keywords. Reason 2: Shoes Sneakers and Rubber Shoes were made by the same manufacturer.')],\n [],\n [],\n [],\n ),\n \n ('Edgar', 'Rubber Shoes'): (\n [(0.633333, 'User Edgar likes many shoe pairs.'), (0.633333, 'Shoe pair Rubber Shoes is popular')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Edgar', 'RS 130'): (\n [(0.633333, 'User Edgar likes many shoe pairs.'), (0.633333, 'Shoe pair RS 130 is popular')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Edgar', 'Design Shoes'): (\n [(0.633333, 'User Edgar likes many shoe pairs.')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Edgar', 'Octane SL'): (\n [(0.633333, 'User Edgar likes many shoe pairs.')], \n [],\n [],\n [],\n [(0.650000, 'User Edgar likes shoes RS 130. And similarity: RS 130 belong to the For Sports category. Octane SL belong to the For Sports category.')],\n [],\n ),\n \n ('Fionna', 'Sneakers'): (\n [(0.766667, 'Shoe pair Sneakers is popular')], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Fionna', 'Rubber Shoes'): (\n [(0.633333, 'Shoe pair Rubber Shoes is popular')], \n [],\n [],\n [(0.600000, 'Similarity: Users Edgar and Fionna are about the same age. And: User Edgar likes shoes Rubber Shoes.')],\n [],\n [],\n ), \n \n ('Fionna', 'RS 130'): (\n [(0.633333, 'Shoe pair RS 130 is popular')], \n [],\n [],\n [(0.600000, 'Similarity: Users Edgar and Fionna are about the same age. And: User Edgar likes shoes RS 130.')],\n [],\n [],\n ),\n \n ('Fionna', 'Design Shoes'): (\n [], \n [],\n [],\n [],\n [],\n [],\n ),\n \n ('Fionna', 'Octane SL'): (\n [], \n [],\n [],\n [],\n [],\n [],\n ), \n }\n \"\"\"Dictionary s-o pair: tuple of elements \n (the pair: expectancy, description): \n - bias, \n - s-o relationships\n - predicted + object similarity\n - predicted + subject similarity\n - predicted + object cluster\n - predicted + subject cluster\n \"\"\"\n \n\n \n def dtest_get_pair_combination_elements(self):\n \"\"\"Test getting all we know for the given s-o pairs\"\"\"\n\n r = self.recommender._get_recommender_model()\n \n bc = BaseCompilator()\n\n # for all subject - object pairs\n for subj in SubjectObject.objects.filter(recommender=r, entity_type='S'):\n\n for obj in SubjectObject.objects.filter(recommender=r, entity_type='O'):\n \n dn_subject = self.universal_entities[subj.name]\n dn_object = self.universal_entities[obj.name]\n \n # call the tested function \n els = bc.get_pair_combination_elements(dn_subject, dn_object)\n \n expected_data = self.EXPECTED_COMBINATION_ELEMENTS[(subj.name, obj.name)]\n \n # flatten the expected data\n flat_data = [pair for listt in expected_data for pair in listt]\n\n # assert the length is right \n eq_(len(flat_data), len(els), \"Expected: %d, Obtained %d. For pair %s, %s Obtained data: %s\" % \\\n (len(flat_data), len(els), subj, obj, els))\n \n for el in els:\n \n # try finding the description in the expected data\n found = filter(lambda pair: pair[1]==el.get_description(), flat_data)\n \n eq_(len(found), 1, \"The description %s wasn't found for the pair %s, %s\" % (el.get_description(), subj, obj))\n found = found[0]\n \n assert_almost_equal(found[0], el.get_expectancy(), PLACES,\n \"The expectancy is wrong for pair %s, %s. Expected %f, Got %f\" % (subj, obj, found[0], el.get_expectancy()))\n \n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 18.5, "blob_id": "5e4066ae41edc0aa7c1a7f8ec796d070d77f0c22", "content_id": "39d9a160c41fc8946eb2a6a18ea979a1b2062a6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 39, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/code/adapter/gr.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#/bin/bash\ngrep -r --include=*.py $1 *\n" }, { "alpha_fraction": 0.6993424892425537, "alphanum_fraction": 0.7065152525901794, "avg_line_length": 29.418182373046875, "blob_id": "a060d34a1df70ccb30aba92abd8b1b6bdac1f4d0", "content_id": "c2c0fa1dbb3cd32ee0d63d48f1879eeaaccdf503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 287, "num_lines": 55, "path": "/code/mahout/mahoutrec/unresystpredict.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# params:\n# lastfm\n# flixster\n# travel\n\n\nLASTFM=false\nFLIXSTER=false\nTRAVEL=false\n\nFLIXSTERBELOW=false\n\nfor param in $*;\ndo \n case $param in\n 'lastfm')\n LASTFM=true\n ;;\n 'flixster')\n FLIXSTER=true\n ;;\n 'travel')\n TRAVEL=true\n ;; \n 'flixsterbelow')\n FLIXSTERBELOW=true\n ;; \n esac\ndone\n\nif [ $LASTFM = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystPredict\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_test.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_predictions.csv\" \nfi\n\nif [ $FLIXSTER = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystPredict\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_test.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_predictions.csv\" \nfi\n\nif [ $FLIXSTERBELOW = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystPredict\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_test.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_predictions.csv\" \nfi\n\n\nif [ $TRAVEL = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystPredict\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_train.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_test.csv /home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_predictions.csv\" \nfi\n\n\necho \"\"\n" }, { "alpha_fraction": 0.7361376881599426, "alphanum_fraction": 0.7361376881599426, "avg_line_length": 29.705883026123047, "blob_id": "b1c76c010eee71b762a42787202fa779a27e40b0", "content_id": "69152065ee0d7f36460fb4c1a9775c6e64d39541", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/code/adapter/flixster/evaluation.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The evaluators for the last.fm recommenders\"\"\"\n\nfrom unresyst.recommender.evaluation import BaseEvaluator\nfrom models import MovieEvaluationPair\nfrom unresyst.recommender.metrics import rmse, precision_recall\n\nclass MovieRecommenderEvaluator(BaseEvaluator):\n \"\"\"The evaluator of the artist recommender\"\"\"\n \n EvaluationPairModel = MovieEvaluationPair\n \"\"\"The model - pairs\"\"\"\n \n prediction_metric = rmse\n \"\"\"The metric\"\"\"\n \n recommendation_metric = precision_recall\n \"\"\"The other metric\"\"\"\n\n" }, { "alpha_fraction": 0.7560126781463623, "alphanum_fraction": 0.7569620013237, "avg_line_length": 76.0243911743164, "blob_id": "e4e4234d3bd3cfeffc22e3d5b6a28a141d698c91", "content_id": "032105fd7530c2e4ce9bb1f334b90dd1659d3b43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 3160, "license_type": "no_license", "max_line_length": 345, "num_lines": 41, "path": "/UnresystCD/readme.txt", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "Welcome to the Unresyst CD!\n---------------------------\n\nCurrent versions of all contained files can be found on http://code.google.com/p/unresyst/\n\nThe directory structure of the CD is the following:\n\n - docs: documentation of the unresyst prototype implementation.\n - diagrams: various diagrams used in the thesis, created in dia, a program for drawing structured diagrams http://live.gnome.org/Dia\n - evaluation: spreadsheets containing the results of unresyst evaluation, including charts\n - thesis: the thesis in the linked pdf format and latex source codes\n - mindmaps: mind maps used for presenting ideas about Unresyst. Created in XMind version: 3.2.0 http://www.xmind.net/\n - pics: pictures used in the thesis\n - presentations: presentations on the thesis from the User-Web seminar.\n - datamodels: data model visualization for each dataset application and for the whole unresyst.\n \n \n \n - code: contains all source code files belonging to the thesis.\n\n - adapter: adapter is a django project home directory (see http://www.djangoproject.com/), there are the following applications in the project. Each application has its own directory. The documentation for the project can be found in the docs/epydoc folder.\n\n - unresyst: the universal recommender application containing the implementation of the universal recommedner\n - lastfm: an application containing data models and a Unresyst recommender for the last.fm dataset.\n - flixster: application for the flixster dataset, the same structure as lastfm\n - travel: application for the travel agency dataset, the same structure as lastfm\n - demo: a demo application used in the tests\n - csv: a directory for temporary csv files used for the external mahout recommender\n - logs: a directory of text files where the evaluation of the recommendations is logged\n - evaluate.txt: a list of commands for running an external recommender for each application\n - example.py: an example of an Unresyst recommender, presented on the User-Web seminar\n - flixstereval.sh, lastfmeval.sh, traveleval.sh: scripts for running an evaluation on example datasets, see the script code for help\n - dump.sh, load.sh: scripts for opperating with the MySQL database\n - setup.sh: a script for loading data from dataset to the corresponing application models, the script just uses python scripts save_data.py, defined in each of the applications. \n - unresyst/tests/: tests for the implementation, the tests can be run using the command 'python ./manage.py test unresyst\"\n\n - dataset_scripts: shell scripts for reducing the datasets as described in the thesis.\n\n - mahout: contains an implementation of the Slope One recommender that was used for a comparison with unresyst. The recommender is implemented in Java using the Mahout recommender framework. See http://mahout.apache.org/ and http://code.google.com/p/unresyst/wiki/CreateMahoutRecommender for download and instalation of the Mahout framework.\n \n - unresystrecommend.sh, unresystpredict.sh: the scripts used for running the recommender on our data sets.\n\n\n" }, { "alpha_fraction": 0.5913978219032288, "alphanum_fraction": 0.5913978219032288, "avg_line_length": 45, "blob_id": "a7006a8fa9180fdf69189b409144c4558add2b25", "content_id": "fbf1ff2571e73a191b467e04bafc59305f331b18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 93, "license_type": "no_license", "max_line_length": 45, "num_lines": 2, "path": "/code/adapter/rmpyc.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "find . -type f -name \\*.pyc -print | xargs rm\nfind . -type f -name \\*.py~ -print | xargs rm\n\n" }, { "alpha_fraction": 0.5462448000907898, "alphanum_fraction": 0.5462448000907898, "avg_line_length": 31.022472381591797, "blob_id": "b606e14182912067aa748b05ac5b1b12f75be5d5", "content_id": "644ee4d31f9a0251faad680e4db4b9700bfa0bf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2876, "license_type": "no_license", "max_line_length": 81, "num_lines": 89, "path": "/code/adapter/unresyst/abstractor/abstractor.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The main class of the abstractor package - Abstractor\"\"\"\n\nfrom base import BaseAbstractor\nfrom unresyst.models.common import SubjectObject, Recommender as RecommenderModel\nfrom unresyst.constants import *\n\nclass BasicAbstractor(BaseAbstractor):\n \"\"\"The basic implementation of the Abstractor class\"\"\"\n\n\n def create_subjectobjects(self, recommender_model, subjects, objects):\n \"\"\"See the base class for documentation.\"\"\"\n \n # if subjects are the same as objects, use the \"so\" entity type \n so = ENTITY_TYPE_SUBJECTOBJECT if subjects == objects else \"\"\n\n # create them again. subjects:\n for subj in subjects.iterator():\n \n # create the subject\n subob = SubjectObject(\n id_in_specific=subj.pk,\n name=subj.__unicode__()[:MAX_LENGTH_NAME],\n entity_type=so or ENTITY_TYPE_SUBJECT,\n recommender=recommender_model\n )\n \n # save it\n subob.save()\n \n print \" %d subjects created\" % subjects.count()\n \n # for recommenders where subjects==objects, that's it\n if so:\n return\n \n # create objects: \n for obj in objects.iterator():\n \n # create the object\n subob = SubjectObject(\n id_in_specific=obj.pk,\n name=obj.__unicode__()[:MAX_LENGTH_NAME],\n entity_type=ENTITY_TYPE_OBJECT,\n recommender=recommender_model\n )\n \n # save it\n subob.save()\n \n print \" %d objects created\" % objects.count()\n\n\n \n def create_predicted_relationship_instances(self, predicted_relationship):\n \"\"\"See the base class for documentation.\"\"\" \n \n # evaluate the relationship for all possible subjectobjects\n predicted_relationship.evaluate()\n \n \n def create_relationship_instances(self, relationships):\n \"\"\"See the base class for documentation.\"\"\"\n\n # evaluate all relationships\n for rel in relationships:\n rel.evaluate()\n\n \n def create_rule_instances(self, rules):\n \"\"\"See the base class for documentation.\"\"\"\n \n # eveluate all rules\n for rule in rules:\n rule.evaluate()\n\n def create_clusters(self, cluster_sets):\n \"\"\"See the base class for documentation.\"\"\"\n \n # evaluate all cluster sets\n for cluster_set in cluster_sets:\n cluster_set.evaluate() \n \n def create_biases(self, biases):\n \"\"\"See the base class for documentation.\"\"\" \n \n # evaluate the biases\n for bias in biases:\n bias.evaluate()\n \n \n" }, { "alpha_fraction": 0.5340961217880249, "alphanum_fraction": 0.5447750091552734, "avg_line_length": 34.819671630859375, "blob_id": "8491b9f2e8b52afe3779e5f92e6d4edc5a28c3d5", "content_id": "cc7eb7e3479bbc007ca79cd3972ef4f5c0018a4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6555, "license_type": "no_license", "max_line_length": 82, "num_lines": 183, "path": "/code/adapter/recommender_example.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"An example of an Unresyst recommender for the last.fm domain.\nThe addaptation rules include:\n - subject-object preference \n - subject-subject similarity\n - object-object similarity\n - object bias\n\nRecommenders used during the evaluation tests can be found in:\n - lastfm/recommender.py\n - travel/recommender.py\n - flixster/recommender.py\n\"\"\"\n\nfrom unresyst import *\nfrom models import *\n\nAGE_DIFFERENCE = 38 - 17\n\"\"\"The age difference between the oldest and the yongest user\"\"\"\n\nMAX_PLAY_COUNT = 542\n\"\"\"The maximum play count for an artist in the period\"\"\"\n\nN_MIN_PLAY_COUNT = 100\n\"\"\"The minimum play count for an artist to apply the bias\"\"\"\n\nPERIOD_START_DATE = datetime.date(2010, 9, 1)\nPERIOD_END_DATE = datetime.date(2010, 12, 31)\n\n\nclass ArtistRecommender(Recommender):\n \"\"\"A recommender recommending artists (musicians) that \n the user can like.\n \"\"\" \n \n name = \" Artist Recommender\"\n \"\"\"The name\"\"\" \n \n subjects = User.objects\n \"\"\"The subjects to who the recommender will recommend.\"\"\"\n \n objects = Artist.objects\n \"\"\"The objects that will be recommended.\"\"\"\n \n predicted_relationship = PredictedRelationship( \n name=\"User listens to artist's tracks.\",\n \n # gives true for user, artist pairs where the user have listened\n # to the artist\n condition=lambda user, artist: \\\n user.scrobble_set.filter(track__artist=artist).exists(), \n \n description=\"\"\"User %(subject)s listens to the %(object)s's tracks.\"\"\"\n )\n \"\"\"The relationship that will be predicted\"\"\"\n \n # the class contains definitions for business rules\n rules = (\n \n # don't recommend artists with male-specific tags to females\n SubjectObjectRule(\n name=\"Don't recommend male music to female users.\",\n\n # the user is a female and the artist was tagged by\n # a male-specific tag\n condition=lambda user, artist: user.gender == 'f' and \\\n artist.artisttag_set.filter(tag__gender_specific='m').exists(),\n \n # it's a negative rule\n is_positive=False,\n \n weight=0.5,\n \n # the more male-specific tags the artist has, the higher is \n # the rule confidence. Normalized by the artist tag count\n confidence=lambda user, artist: float(\n artist.artisttag_set.filter(tag__gender_specific='m').count())/ \\\n artist.artisttag_set.count(),\n \n description=\"Artist %(object)s isn't recommended to %(subject)s, \" +\n \"because the artist is considered male-specific.\"\n ),\n \n \n # users of similar age are similar\n SubjectSimilarityRule(\n name=\"Users with similar age.\",\n \n # both users have given their age and the difference \n # is lower than five\n condition=lambda user1, user2: \n user1.age and user2.age and abs(user1.age - user2.age) <= 5,\n \n is_positive=True, \n \n weight=0.5,\n \n # a magic linear confidence function\n confidence=lambda user1, user2: \n 1 - float(abs(user1.age - user2.age))/AGE_DIFFERENCE,\n \n description=\"Users %(subject1)s and %(subject2)s are about \" + \n \"the same age.\"\n ), \n \n # artists sharing some tags are similar\n ObjectSimilarityRule(\n name=\"Artists sharing some tags.\",\n\n # both artists have some tags and they share at least one tag\n condition=lambda artist1, artist2: \\\n artist1.artisttag_set.exists() and \\\n artist2.artisttag_set.exists() and \\\n artist1.artisttag_set.filter(\n tag__id__in=artist2.artisttag_set.values_list('tag__id')\n ).exists(),\n \n # it's a positive rule\n is_positive=True,\n \n weight=0.5,\n \n # The more tags the artists have in common, the higher is \n # the similarity confidence\n confidence=lambda artist1, artist2: \\\n float(artist1.artisttag_set.filter(\n tag__id__in=artist2.artisttag_set.values_list('tag__id')\n ).count()) / \\\n min(artist1.artisttag_set.count(), artist2.artisttag_set.count()),\n \n description=\"Artists %(object1)s and %(object2)s are similar \" +\n \"because they share some tags.\"\n ),\n )\n \n relationships = (\n # if two users are from the same country, they are similar\n SubjectSimilarityRelationship(\n name=\"Users living in the same country\",\n \n # both users have given their country and it's the same\n condition=lambda user1, user2:\n user1.country and \\\n user2.country and \\\n user1.country == user2.country,\n \n # it's relationship positive to similarity\n is_positive=True, \n \n weight=0.5, \n \n description=\"Users %(subject1)s and %(subject2)s are from \" + \\\n \" the same country.\",\n ),\n )\n \n biases = (\n ObjectBias(\n name=\"Artists whose tracks have been listened a lot recently.\",\n \n description=\"%(object)s has been listened much recently.\",\n \n # take only artists with more than the minimal play count\n # in the given period\n condition=lambda artist: \\\n artist.track_set\\\n .filter(scrobble__timestamp__range=\n (PERIOD_START_DATE, PERIOD_END_DATE))\\\n .aggregate(Count('scrobble')) > N_MIN_PLAY_COUNT,\n \n weight=0.5,\n \n # it's a positive bias\n is_positive=True,\n \n # the number of scrobbles for the artist divided by the max.\n confidence=lambda artist: \\\n float(artist.track_set\\\n .filter(scrobble__timestamp__range=\n (PERIOD_START_DATE, PERIOD_END_DATE))\\\n .annotate(scrobble_count=Count('scrobble'))\\\n .aggregate(Sum('scrobble_count')))/MAX_PLAY_COUNT\n ),\n )\n" }, { "alpha_fraction": 0.5987638235092163, "alphanum_fraction": 0.6003090143203735, "avg_line_length": 32.84716033935547, "blob_id": "b8d5a394c3f4eb03cbd9ec92f382c65b8c472e4a", "content_id": "a196c296da5fc0ad056f21531bcc8a73ef3c3a3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7766, "license_type": "no_license", "max_line_length": 107, "num_lines": 229, "path": "/UnresystCD/code/adapter/unresyst/exceptions.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Errors thrown by the Unresyst application.\"\"\"\n\nclass UnresystError(Exception):\n \"\"\"The base exception for all exceptions.\n \n Defines the message property.\n \"\"\"\n def _get_message(self): \n return self._message\n def _set_message(self, message): \n self._message = message\n message = property(_get_message, _set_message)\n\nclass CombinatorError(UnresystError):\n \"\"\"An error in combinator\"\"\"\n pass\n \nclass RecommenderError(UnresystError):\n \"\"\"The base class for exceptions related to a recommender.\n\n @type message: string\n @ivar message: the reason for the error \n \n @type recommender: recommender.Recommender\n @ivar recommender: the recommender on which the error occured \n \"\"\" \n \n def __init__(self, message, recommender):\n \"\"\"The constructor.\"\"\" \n\n self.message = message\n \"\"\"The message saying the reason for the error\"\"\" \n \n self.recommender = recommender\n \"\"\"The recommender for which the error is raised\"\"\" \n \nclass RecommenderBuildError(RecommenderError):\n \"\"\"An error during the build of the recommender.\"\"\"\n pass\n\nclass RecommenderNotBuiltError(RecommenderError):\n \"\"\"Exception meaning that the recommender hasn't been built even though\n it should have been.\n \n @type message: string\n @ivar message: the reason for the error \n \n @type recommender: recommender.Recommender\n @ivar recommender: the recommender on which the error occured\n \"\"\" \n\n\n def __str__(self):\n return (\"The recommender needs to be built to perform the action.\\n\" + \\\n \" message: %s\\n\" + \\\n \" recommender: %s\\n\") \\\n % (self.message, self.recommender.name)\n \n\nclass ConfigurationError(RecommenderError):\n \"\"\"Exception meaning that something is wrong in the recommender configuration. \n\n @type message: string\n @ivar message: the reason why the parameter is invalid\n\n @type recommender: recommender.Recommender\n @ivar recommender: the recommender on which the error occured\n \n @type parameter_name: string\n @ivar parameter_name: the name of the invalid parameter\n \n @type parameter_value: object\n @ivar parameter_value: the current (invalid) parameter value \n \"\"\"\n \n def __init__(self, message, recommender, parameter_name, parameter_value):\n \"\"\"The constructor.\"\"\" \n\n super(ConfigurationError, self).__init__(message, recommender)\n \n self.parameter_name = parameter_name\n \"\"\"The name of the invalid parameter\"\"\"\n \n self.parameter_value = parameter_value\n \"\"\"The current (invalid) value of the parameter\"\"\" \n\n def __str__(self):\n return (\"A configuration parameter is invalid.\\n\" + \\\n \" message: %s\\n\" + \\\n \" recommender: %s\\n\" + \\\n \" parameter name: %s\\n\" + \\\n \" parameter value: %s\\n\") \\\n % (self.message, self.recommender.name, self.parameter_name, self.parameter_value)\n\n\nclass RuleRelationshipError(RecommenderError):\n \"\"\"An error in the configuration of rules/relationships.\n\n @type message: str\n @ivar message: additional message\n\n @type recommender: recommender.Recommender\n @ivar recommender: the recommender on which the error occured\n \n @type name: str\n @ivar name: the name of the rule/relationship where the error occured \n \"\"\"\n \n def __init__(self, message, recommender, name):\n \"\"\"The constructor.\"\"\" \n\n super(RuleRelationshipError, self).__init__(message, recommender)\n \n self.name = name\n \"\"\"The rule/relationship name\"\"\"\n \n\nclass DescriptionKeyError(RuleRelationshipError):\n \"\"\"Exception meaning that there are wrong strings in the rule/relationship\n description. \n\n @type message: str\n @ivar message: additional message\n \n @type recommender: recommender.Recommender\n @ivar recommender: the recommender on which the error occured\n \n @type name: str\n @ivar name: the name of the rule/relationship where the error occured\n \n @type key: str\n @ivar key: the key that shouldn't have been there\n \n @type permitted_keys: str list\n @ivar permitted_keys: the keys that can be in the description\n \"\"\"\n \n def __init__(self, message, recommender, name, key, permitted_keys):\n \"\"\"The constructor.\"\"\" \n\n super(DescriptionKeyError, self).__init__(message, recommender, name)\n \n self.key = key\n \"\"\"The string key that is wrong\"\"\"\n \n self.permitted_keys = permitted_keys\n \"\"\"The list of permitted keys\"\"\"\n\n\n def __str__(self):\n return (\"There's an invalid format key '%s' in the rule/relationship description.\\n\" + \\\n \" message: %s\\n\" + \\\n \" recommender: %s\\n\" + \\\n \" rule/relationship name: %s\\n\" + \\\n \" the invalid key: %s\\n\" + \\\n \" The permitted keys are: %s\") \\\n % (self.key, self.message, self.recommender.name, self.name, self.key, self.permitted_keys) \n\nclass InvalidParameterError(RecommenderError):\n \"\"\"An exception raised when a parameter passed to a function is invalid.\n\n @type message: string\n @ivar message: the reason why the parameter is invalid\n\n @type recommender: recommender.Recommender\n @ivar recommender: the recommender on which the error occured\n \n @type parameter_name: string\n @ivar parameter_name: the name of the invalid parameter\n \n @type parameter_value: object\n @ivar parameter_value: the current (invalid) parameter value\n \n \"\"\"\n def __init__(self, message, recommender, parameter_name, parameter_value):\n \"\"\"The constructor.\"\"\" \n\n super(InvalidParameterError, self).__init__(message, recommender)\n \n self.parameter_name = parameter_name\n \"\"\"The name of the invalid parameter\"\"\"\n \n self.parameter_value = parameter_value\n \"\"\"The current (invalid) value of the parameter\"\"\" \n \n def __str__(self):\n return (\"The parameter passed to the function is invalid.\\n\" + \\\n \" message: %s\\n\" + \\\n \" recommender: %s\\n\" + \\\n \" parameter name: %s\\n\" + \\\n \" parameter value: %s\") \\\n % (self.message, self.recommender.name, self.parameter_name, self.parameter_value)\n \n \nclass SymmetryError(UnresystError):\n \"\"\"An error for handling symmetric relationship errors.\n \n @type message: string\n @ivar message: the reason for the error\n \n @type object1: object\n @ivar object1: the first object in the relationship\n \n @type object2: object\n @ivar object2: the second object in the relationship\n \"\"\"\n \n def __init__(self, message, object1, object2):\n \"\"\"The constructor\"\"\"\n\n self.message = message\n \"\"\"the reason for the error\"\"\"\n \n self.object1 = object1\n \"\"\"the first object in the relationship\"\"\"\n \n self.object2 = object2 \n \"\"\"the second object in the relationship\"\"\"\n \n def __str__(self):\n return (\"There's an error in relationship symmetry.\\n\" + \\\n \" message: %s\\n\" + \\\n \" first object in relationship: %s\\n\" + \\\n \" second object in relationship: %s\") \\\n % (self.message, self.object1, self.object2) \n \nclass EmptyTestSetError(UnresystError):\n \"\"\"An error indicating that the test set is empty.\"\"\"\n pass \n" }, { "alpha_fraction": 0.8471337556838989, "alphanum_fraction": 0.8471337556838989, "avg_line_length": 38.25, "blob_id": "b12b94b6c2d011fd8bd11d69d21be903195767e9", "content_id": "043a96d55420568d1741424d9735fb4331d54093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 52, "num_lines": 4, "path": "/code/adapter/unresyst/compilator/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The layer making predictions from aggregates\"\"\"\n\nfrom get_first_compilator import GetFirstCompilator\nfrom combining_compilator import CombiningCompilator\n" }, { "alpha_fraction": 0.5678645968437195, "alphanum_fraction": 0.5681615471839905, "avg_line_length": 32.83333206176758, "blob_id": "e1f2f7507d3a580f771c0a1f8cb7ad15ac713776", "content_id": "e8abee01b6e2d8adcc5c76312f84296b7ed1ff34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6734, "license_type": "no_license", "max_line_length": 88, "num_lines": 198, "path": "/code/adapter/unresyst/recommender/external_recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"A module for recommender making predictions in an outside engine\"\"\"\nimport csv\n\nfrom base import BaseRecommender\nfrom predictions import RelationshipPrediction\nfrom unresyst.exceptions import RecommenderError, RecommenderNotBuiltError\nfrom unresyst.models.algorithm import ExternalPrediction\nfrom unresyst.models.common import Recommender as RecommenderModel\nfrom unresyst.constants import *\n\nclass ExternalRecommender(BaseRecommender):\n \"\"\"A class representing an outside-world recommender with an Unresyst \n interface.\n \"\"\"\n \n PredictionModel = ExternalPrediction\n \"\"\"The model where predictions are stored\"\"\" \n \n # build phase:\n #\n \n @classmethod\n def build(cls):\n \"\"\"Raise an error, as thist can't be done here, but outside.\n \n @raise RecommenderError: always\n \"\"\"\n raise RecommenderError(\"The method is invalid for the external\" + \\\n \" recommender. Call the export_data, build it outside and \" + \\\n \"import_predictions.\", cls)\n\n\n @classmethod\n def export_data(cls, filename):\n \"\"\"Export the data from subject and object managers to a csv \n file of the given name.\n \n @type filename: str\n @param filename: the full path to the file\n \n @raise FileNotExists and other file open errors.\n \"\"\"\n \n with open(filename, 'w') as f:\n \n rel = cls.explicit_rating_rule \\\n if cls.explicit_rating_rule else cls.predicted_relationship\n \n # export the relationships to the given file\n rel.export(f)\n \n @classmethod\n def import_predictions(cls, filename):\n \"\"\"Load predictions from the given csv file.\n \n Creates the recommender model for the recommender and imports the \n predictions from the given file. The file has to be in format:\n <id subject>,<id object>,<prediction>\\n\n \n @type filename: str\n @param filename: the full path to the file\n\n @raise FileNotExists and other file open errors.\n \"\"\"\n \n cls._print('Deleting old predictions...')\n \n \n # if the recommender with the given name exists, delete it,\n RecommenderModel.objects.filter(class_name=cls.__name__).delete()\n \n # create a new recommender and save it\n recommender_model = RecommenderModel(\n class_name=cls.__name__,\n name=cls.name,\n is_built=False,\n are_subjects_objects=False\n ) \n recommender_model.save() \n \n cls._print('Importing new predictions...')\n \n # open the csv reader\n reader = csv.reader(open(filename, \"rb\"), delimiter=',', quoting=csv.QUOTE_NONE)\n \n # parse the csv line by line\n for subj_id, obj_id, expectancy in reader:\n \n # parse the values to the right types\n subj_id = int(subj_id)\n obj_id = int(obj_id)\n expectancy = float(expectancy)\n \n # if it's already imported, go to the next pair\n if cls.PredictionModel.objects.filter(\n subj_id=subj_id, obj_id=obj_id, recommender=recommender_model):\n continue\n \n # create a prediction\n cls.PredictionModel.objects.create(\n subj_id=subj_id,\n obj_id=obj_id,\n recommender=recommender_model,\n expectancy=expectancy\n )\n \n recommender_model.is_built=True\n recommender_model.save() \n \n cls._print('Done.')\n\n \n # recommend phase\n #\n @classmethod \n def predict_relationship(cls, subject, object_, save_to_db=False):\n \"\"\"See the base class for the explanation\n\n If the prediction doesn't exist retruns None.\n \"\"\"\n\n recommender_model = cls._get_recommender_model()\n \n # if the recommender isn't built raise an error\n if not recommender_model or not recommender_model.is_built:\n raise RecommenderNotBuiltError(\n message=\"Build the recommender prior to performing the \" + \\\n \"predict_relationship action.\",\n recommender=cls\n )\n \n # get the prediction if it exists\n qs_prediction_model = cls.PredictionModel.objects.filter(\n recommender=recommender_model,\n subj_id=subject.pk,\n obj_id=object_.pk\n )\n \n # if not return none\n if not qs_prediction_model:\n return None\n \n # if so extract the expectancy\n assert qs_prediction_model.count() == 1 \n expectancy = qs_prediction_model[0].expectancy\n \n # create and return the outer-world object\n prediction = RelationshipPrediction(\n subject=subject,\n object_=object_,\n expectancy=expectancy,\n is_uncertain=abs(expectancy - UNCERTAIN_PREDICTION_VALUE) < EXP_PRECISION\n ) \n return prediction\n\n\n @classmethod\n def get_recommendations(cls, subject, count=None): \n \"\"\"For documentation, see the base class\"\"\"\n \n recommender_model = cls._get_recommender_model()\n \n # if the recommender isn't built raise an error\n if not recommender_model or not recommender_model.is_built:\n raise RecommenderNotBuiltError(\n message=\"Build the recommender prior to performing the \" + \\\n \"get_recommendations action.\",\n recommender=cls\n )\n \n # if count wasn't given take the default one\n if not count:\n count = cls.default_recommendation_count\n \n # get the prediction objects\n qs_predictions = cls.PredictionModel.objects.filter(\n subj_id=subject.pk,\n recommender=recommender_model)\\\n .order_by('-expectancy')[:count]\n \n recommendations = []\n \n # go through the obtained predictions\n for pred_model in qs_predictions:\n\n # get the object by the id\n object_=cls.objects.get(pk=pred_model.obj_id)\n \n # create the outer-world object\n prediction = RelationshipPrediction(\n subject=subject,\n object_=object_,\n expectancy=pred_model.expectancy\n ) \n \n recommendations.append(prediction)\n\n return recommendations\n \n \n \n \n\n" }, { "alpha_fraction": 0.8885714411735535, "alphanum_fraction": 0.8885714411735535, "avg_line_length": 49, "blob_id": "70352b51a1d48ab49add548b003010777b50fb5b", "content_id": "273cda81444a14fdebc1b6b8b4d4649311fd0271", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 100, "num_lines": 7, "path": "/UnresystCD/code/adapter/unresyst/combinator/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The combining layer of Unresyst\"\"\"\n\nfrom function_combinator import FunctionCombinator\nfrom combination_element import RelSimilarityCombinationElement, ClusterSimilarityCombinationElement\nfrom average_combinator import AverageCombinator\nfrom twisted_average import TwistedAverageCombinator\nfrom confidence_factor import ConfidenceFactorCombinator\n" }, { "alpha_fraction": 0.6129541993141174, "alphanum_fraction": 0.6200631856918335, "avg_line_length": 36.235294342041016, "blob_id": "5bb1986eef9f3ddc7f24b5f21b64cb4710226911", "content_id": "b0eed7bd7e60bb70c8a550796fc9139839a1ae65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 102, "num_lines": 34, "path": "/UnresystCD/code/adapter/unresyst/combinator/twisted_average.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The twisted average combinator\"\"\"\n\n\nfrom base import BaseCombinator\n\nclass TwistedAverageCombinator(BaseCombinator):\n \"\"\"A combinator using weighted average\n \"\"\"\n \n def _combine(self, combination_elements, ResultClass):\n \"\"\"See the base class for documentation\n See the thesis text for the explanation of twisted average\n \"\"\"\n \n # number of positive elements\n num_positive = len(filter(lambda ce: ce.get_positiveness(), combination_elements))\n \n # the difference between the number of positive and negative\n pos_dif = abs(len(combination_elements) - 2 * num_positive)\n \n # the average expectancy\n avgexp = sum([ce.get_expectancy() for ce in combination_elements]) / len(combination_elements)\n \n # select the formula according to the expectancy\n if avgexp <= 0.5:\n res_exp = pow(2, pos_dif) * pow(avgexp, pos_dif + 1)\n else:\n res_exp = 1 - abs(pow(2, pos_dif) * pow((avgexp - 1), pos_dif + 1))\n\n # concat the description\n desc = self._concat_descriptions(combination_elements)\n \n # return the resulting class\n return ResultClass(expectancy=res_exp, description=desc)\n" }, { "alpha_fraction": 0.5863401889801025, "alphanum_fraction": 0.5906894207000732, "avg_line_length": 43.88768005371094, "blob_id": "210ebfcf21c921cbe01a6946738b98bfdbeade9f", "content_id": "6705c2e5e9ccff81de0fd76b46a172caf86cb155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12416, "license_type": "no_license", "max_line_length": 111, "num_lines": 276, "path": "/UnresystCD/code/adapter/unresyst/compilator/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The base class for all compilators BaseCompilator\"\"\"\nfrom django.db.models import Q\n\nfrom unresyst.constants import *\nfrom unresyst.combinator.combination_element import BiasAggregateCombinationElement, \\\n SubjectObjectRelCombinationElement, PredictedPlusObjectSimilarityCombinationElement, \\\n PredictedPlusSubjectSimilarityCombinationElement, PredictedPlusObjectClusterMemberCombinationElement, \\\n PredictedPlusSubjectClusterMemberCombinationElement, ClusterSimilarityCombinationElement\nfrom unresyst.models.aggregator import AggregatedBiasInstance, AggregatedRelationshipInstance\nfrom unresyst.models.abstractor import PredictedRelationshipDefinition, RelationshipInstance, \\\n ClusterMember, ExplicitRuleInstance\n\n\nclass BaseCompilator(object):\n \"\"\"The base class for all compilators\"\"\"\n \n def __init__(\n self, \n combinator=None, \n depth=DEFAULT_COMPILATOR_DEPTH, \n breadth=DEFAULT_COMPILATOR_BREADTH,\n pair_depth=DEFAULT_COMPILATOR_PAIR_DEPTH):\n \"\"\"The initializer\"\"\"\n\n self.combinator = combinator\n \"\"\"The combinator that will be used in compiling (e.g. the combination of cluster members\n to get similarity. If None, the get first strategy is used.\n \"\"\" \n \n self.depth = depth\n \"\"\"The depth until where the comiplates should be done\"\"\"\n \n self.breadth = breadth\n \"\"\"The number of neighbours that will be taken during the compilation\"\"\" \n \n self.pair_depth = pair_depth\n \"\"\"The number of combination elements that can be taken for each pair from\n each group. Used only for cluster membership.\n \"\"\"\n\n \n\n def compile_all(self, recommender_model):\n \"\"\"Compile preferences, known relationships + similarities. \n \"\"\"\n pass\n \n \n def get_pair_combination_elements(self, dn_subject, dn_object):\n \"\"\"Find all we know about the relationship of dn_subject and\n dn_subject using:\n - aggregated bias for both\n - s-o relationships (all)\n - predicted_relationship + object_similarities\n - predicted_relationship + subject similarities\n - predicted_relationship + subject cluster memberships (pairs not covered by similarities)\n - predicted_relationship + object cluster memberships (pairs not covered by similarities)\n --------- depth COMPILATOR_DEPTH_ONE_UNSURE\n \n @type dn_subject, dn_object: SubjectObject \n @param dn_subject, dn_object: the pair to get combination elements for\n \n @rtype: iterable of BaseCombinationElement\n @return: the list of all we know about the pair\n \"\"\"\n recommender_model = dn_subject.recommender\n els = []\n other_objs = []\n other_subjs = [] \n \n predicted_def = PredictedRelationshipDefinition.objects.get(\n recommender=recommender_model)\n # aggregated bias for both\n qs_bias = AggregatedBiasInstance.objects.filter(\n subject_object__id__in=[dn_subject.id, dn_object.id])\n \n for bias in qs_bias:\n els.append(BiasAggregateCombinationElement(bias_aggregate=bias))\n\n # s-o relationships (all)\n #\n \n qs_rels = RelationshipInstance.objects\\\n .exclude(definition=predicted_def)\\\n .filter(definition__recommender=recommender_model)\\\n .filter(subject_object1=dn_subject, subject_object2=dn_object)\\\n \n for rel in qs_rels:\n els.append(SubjectObjectRelCombinationElement(rel_instance=rel))\n \n # explicit also \n qs_expl_rels = ExplicitRuleInstance.objects\\\n .filter(definition__recommender=recommender_model)\\\n .filter(subject_object1=dn_subject, subject_object2=dn_object)\\\n \n for rel in qs_expl_rels:\n els.append(SubjectObjectRelCombinationElement(rel_instance=rel))\n \n # predicted_relationship + object_similarities\n #\n\n # get similarities starting the traverse with the similarity.\n qs_sim1 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model)\\\n .filter(\n Q(\n # take dn_object as stable - in the subject_object2 position of the similarity\n subject_object2=dn_object,\n \n # traverse from the other object in similarity (subject_object1) through\n # the relationship instance, its subject (subject_object1) must be dn_subject\n subject_object1__relationshipinstance_relationships2__subject_object1=dn_subject,\n \n # the relationship instance definition must be the predicted relationship def\n subject_object1__relationshipinstance_relationships2__definition=predicted_def) | \\\n Q( \n # take dn_object as stable again, now in the subject_object1 position of the similarity\n subject_object1=dn_object, \n \n # traverse from the other through relationship to dn_subject\n subject_object2__relationshipinstance_relationships2__subject_object1=dn_subject,\n \n # the definition again must be the predicted\n subject_object2__relationshipinstance_relationships2__definition=predicted_def)).distinct()\n \n\n \n # create combination elements\n for sim_rel in qs_sim1:\n \n # get the object that is similar to dn_object\n other_obj = sim_rel.get_related(dn_object)\n \n other_objs.append(other_obj)\n \n # get the predicted relationship to the other object\n predicted_rel = RelationshipInstance.objects.get(\n definition=predicted_def, \n subject_object1=dn_subject,\n subject_object2=other_obj)\n \n els.append(PredictedPlusObjectSimilarityCombinationElement(\n predicted_rel=predicted_rel,\n similarity_aggregate=sim_rel))\n \n\n \n # predicted_relationship + subject similarities\n #\n qs_sim2 = AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model)\\\n .filter(\n Q(\n # take dn_subject as stable - in the subject_object2 position of the similarity\n subject_object2=dn_subject,\n \n # traverse from the other object in similarity (subject_object1) through\n # the relationship instance, its object (subject_object2) must be dn_object\n subject_object1__relationshipinstance_relationships1__subject_object2=dn_object,\n \n # the relationship instance definition must be the predicted relationship def\n subject_object1__relationshipinstance_relationships1__definition=predicted_def) | \\\n Q( \n # take dn_subject as stable again, now in the subject_object1 position of the similarity\n subject_object1=dn_subject, \n \n # traverse from the other through relationship to dn_object\n subject_object2__relationshipinstance_relationships1__subject_object2=dn_object,\n \n # the definition again must be the predicted\n subject_object2__relationshipinstance_relationships1__definition=predicted_def)).distinct()\n \n # create combination elements\n for sim_rel in qs_sim2:\n \n # get the object that is similar to dn_object\n other_subj = sim_rel.get_related(dn_subject)\n\n other_subjs.append(other_subj)\n \n \n # get the predicted relationship to the other subject\n predicted_rel = RelationshipInstance.objects.get(\n definition=predicted_def, \n subject_object1=other_subj,\n subject_object2=dn_object)\n \n els.append(PredictedPlusSubjectSimilarityCombinationElement(\n predicted_rel=predicted_rel,\n similarity_aggregate=sim_rel))\n\n \n \n # predicted_relationship + object cluster memberships (pairs not covered by similarities) \n # \n \n # take the second membership from the object (first on the path from subject)\n # exclude the trivial cases\n qs_memberships_o = ClusterMember.objects.filter(\n cluster__cluster_set__recommender=recommender_model,\n cluster__clustermember__member=dn_object,\n member__relationshipinstance_relationships2__definition=predicted_def,\n member__relationshipinstance_relationships2__subject_object1=dn_subject)\\\n .exclude(member=dn_object)\\\n .distinct()\\\n .order_by('-confidence')\n \n # create the combination elements \n for cm in qs_memberships_o[:self.pair_depth]:\n \n # dont include the ones that are already there because of the similarity\n if cm.member in other_objs:\n continue\n \n # get the predicted relationship to the subject\n predicted_rel = RelationshipInstance.objects.get(\n definition=predicted_def, \n subject_object1=dn_subject,\n subject_object2=cm.member)\n \n # get the other membership\n second_membership = ClusterMember.objects.get(\n cluster=cm.cluster,\n member=dn_object)\n \n # create the combination element out of the memberships \n ce = ClusterSimilarityCombinationElement(\n cluster_members=(cm, second_membership)) \n \n els.append(PredictedPlusObjectClusterMemberCombinationElement(\n predicted_rel=predicted_rel,\n cluster_combination_element=ce)) \n \n \n # predicted_relationship + subject cluster memberships (pairs not covered by similarities) \n # \n \n # take the second membership from the subject\n # exclude the trivial cases\n qs_memberships_s = ClusterMember.objects.filter(\n cluster__cluster_set__recommender=recommender_model,\n cluster__clustermember__member=dn_subject,\n member__relationshipinstance_relationships1__definition=predicted_def,\n member__relationshipinstance_relationships1__subject_object2=dn_object)\\\n .exclude(member=dn_subject)\\\n .distinct()\\\n .order_by('-confidence')\n \n # create the combination elements \n for cm in qs_memberships_s[:self.pair_depth]:\n \n # dont include the ones that are already there because of the similarity\n if cm.member in other_subjs:\n continue\n \n # get the predicted relationship to the object\n predicted_rel = RelationshipInstance.objects.get(\n definition=predicted_def, \n subject_object1=cm.member,\n subject_object2=dn_object)\n \n # get the other membership\n first_membership = ClusterMember.objects.get(\n cluster=cm.cluster,\n member=dn_subject)\n \n # create the combination element out of the memberships \n ce = ClusterSimilarityCombinationElement(\n cluster_members=(first_membership, cm)) \n \n # append the element to the result \n els.append(PredictedPlusSubjectClusterMemberCombinationElement(\n predicted_rel=predicted_rel,\n cluster_combination_element=ce)) \n \n return els\n \n \n \n" }, { "alpha_fraction": 0.6250635385513306, "alphanum_fraction": 0.6255719661712646, "avg_line_length": 29.703125, "blob_id": "25bec476c43ad9c0f3a1bb9e4445a55935df12d9", "content_id": "b1a85e4a4706479fa3844af78cb7d5d6652427f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3934, "license_type": "no_license", "max_line_length": 80, "num_lines": 128, "path": "/UnresystCD/code/adapter/demo/models.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Demo models for the demo application\"\"\"\n\nfrom django.db import models\n\nfrom constants import *\n\nclass ShoePair(models.Model):\n \"\"\"A model for a pair of shoes.\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The shoe model name\"\"\"\n \n manufacturer = models.ForeignKey('Manufacturer', null=True)\n \"\"\"The manufacturer who made the shoes.\"\"\"\n \n for_winter = models.BooleanField(default=False)\n \"\"\"Are the shoes suitable for winter?\"\"\"\n \n keywords = models.ManyToManyField('Keyword')\n \"\"\"The keywords associated with the shoes.\"\"\"\n\n image_path = models.CharField(max_length=MAX_LENGTH_IMAGE_PATH)\n \"\"\"The path to the shoe image, relative to the MEDIA_ROOT\"\"\"\n \n category = models.ForeignKey('ShoeCategory', null=True)\n \"\"\"The category where the shoe pair belongs\"\"\"\n \n def get_avatar_path(self):\n return self.image_path\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass ShoeCategory(models.Model): \n \"\"\"A category of shoes, like casual, sport, ...\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The shoe category name\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name\n\n\nclass Keyword(models.Model):\n \"\"\"A model for a keyword associated with shoe pair.\"\"\"\n \n word = models.CharField(max_length=MAX_LENGTH_NAME, unique=True)\n \"\"\"The key word.\"\"\" \n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.word\n\nclass User(models.Model):\n \"\"\"A model for the users of the demo system.\"\"\"\n\n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The user's name\"\"\"\n\n age = models.PositiveIntegerField(null=True, default=None)\n \"\"\"The age of the user.\"\"\"\n \n likes_shoes = models.ManyToManyField('ShoePair', related_name='likers')\n \"\"\"User's favorite shoes.\"\"\"\n \n viewed_shoes = models.ManyToManyField('ShoePair', related_name='viewers')\n \"\"\"Shoes the user has viewed.\"\"\"\n \n home_city = models.ForeignKey('City', null=True, default=None) \n \"\"\"The city where the user lives.\"\"\"\n \n words_searched = models.ManyToManyField('Keyword', related_name='searchers')\n \"\"\"The words searched by the user.\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name\n\nclass ShoeRating(models.Model):\n \"\"\"An explicit rating by user to a shoe pair\"\"\"\n \n user = models.ForeignKey('User')\n \"\"\"The rater\"\"\"\n \n shoe_pair = models.ForeignKey('ShoePair')\n \"\"\"The rated shoe pair\"\"\"\n \n stars = models.PositiveIntegerField()\n \"\"\"Number of stars from 0 to 5 given to the shoepair\"\"\"\n\n class Meta:\n unique_together = ('user', 'shoe_pair')\n \"\"\"There can be only one rating from a user to a shoe pair\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"%s: %s (%d)\" % (self.user, self.shoe_pair, self.stars) \n \n\nclass Manufacturer(models.Model):\n \"\"\"A model for a shoe manufacturer\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the manufacturer\"\"\"\n \n home_city = models.ForeignKey('City', null=True)\n \"\"\"The city where the manufacturer seats.\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass City(models.Model):\n \"\"\"A model for a city.\"\"\" \n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the city.\"\"\"\n \n in_south = models.BooleanField(default=False)\n \"\"\"Is the city in south?\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n" }, { "alpha_fraction": 0.7131147384643555, "alphanum_fraction": 0.7418032884597778, "avg_line_length": 26, "blob_id": "5459b44dd127d7846518d124979b6801463a65b7", "content_id": "0cce9ecc7bab47dcb095cc4beb97f35d24fb5761", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 244, "license_type": "no_license", "max_line_length": 85, "num_lines": 9, "path": "/code/adapter/modelviz.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#/bin/bash\n\n# 1 argument: the application name for which models visualization should be performed\n# there must exist the 'modelvizs' directory\n\npython modelviz.py $1 > $1.dot\ndot $1.dot -Tpng -o modelvizs/$1.png\nrm $1.dot\neog modelvizs/$1.png \n" }, { "alpha_fraction": 0.5691642761230469, "alphanum_fraction": 0.5788905024528503, "avg_line_length": 29.80555534362793, "blob_id": "e013e8a26d2b1c4a91c20d6a2c421f099f3254c3", "content_id": "bd40d365043ff62f86670e6a64dd0a739646a891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5552, "license_type": "no_license", "max_line_length": 104, "num_lines": 180, "path": "/UnresystCD/code/adapter/lastfm/recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The configuration for the last.fm recommender\"\"\"\nfrom django.db.models import Sum, Count\n\nfrom unresyst import *\n\nfrom models import *\nfrom constants import *\n\n\nAGE_DIFFERENCE = 38 - 17\n\nREGISTERED_DIFFERENCE = 733696 - 731857\n\nMAX_SCROBBLE_COUNT = 85\n\nSCROBBLE_DIFFERENCE = MAX_SCROBBLE_COUNT - 1\n\ndef _listens_artist_generator():\n \"\"\"The generator to the predicted relationship\"\"\"\n for u in User.objects.iterator():\n for a in Artist.objects.filter(track__scrobble__user=u).distinct().iterator():\n yield (u, a)\n\ndef _get_artist_tag_pairs(artist):\n \"\"\"Get list tags, confidence for all tags he/she has\"\"\" \n \n sumcount = artist.artisttag_set.aggregate(Sum('count'))['count__sum']\n \n # get only tags that are shared between at least two artists \n artist_tags = artist.artisttag_set.annotate(tagcount=Count('tag__artisttag')).filter(tagcount__gt=1)\n\n # confidence is counted as \n # the count of the tag / how many times the artist was tagged.\n return [(at.tag.name, float(at.count)/sumcount) \\\n for at in artist_tags]\n\ndef _tag_similarity_generator():\n \"\"\"Generate the pairs that share some tags\"\"\"\n \n # the artists that were tagged at least once\n qs_tagged_artists = Artist.objects.filter(artisttag__isnull=False).distinct()\n count = qs_tagged_artists.count()\n \n for a1, count in zip( qs_tagged_artists[1:].iterator(), \\\n range(1, count)):\n\n # obtain only first count entities\n for a2 in qs_tagged_artists[:count].iterator():\n if a1.artisttag_set.filter(\n tag__id__in=a2.artisttag_set.values_list('tag__id')\n ).distinct().count() > 45:\n yield (a1, a2) \n\ndef _gender_specific_generator():\n \"\"\"Generate pairs for the gender-specific tag rule\"\"\"\n \n qs_tagged_artists = Artist.objects.filter(artisttag__tag__gender_specific='m').distinct()\n \n for a in qs_tagged_artists:\n for u in User.objects.filter(gender='f'):\n yield (u, a)\n\nclass NovelArtistRecommender(Recommender):\n \"\"\"A recommender for discovering previously unheard artists\"\"\" \n\n name = \"Novel Artist Recommender\"\n \"\"\"The name\"\"\" \n \n subjects = User.objects\n \"\"\"The subjects to who the recommender will recommend.\"\"\"\n \n objects = Artist.objects\n \"\"\"The objects that will be recommended.\"\"\" \n\n predicted_relationship = PredictedRelationship( \n name=\"User listens to artist's tracks.\",\n condition=None, \n description=\"\"\"User %(subject)s listens to the %(object)s's tracks.\"\"\",\n generator=_listens_artist_generator\n )\n \"\"\"The relationship that will be predicted\"\"\"\n\n \"\"\"\n clusters:\n user:\n - country\n - gender\n \"\"\"\n \n relationships = ()\n \n rules = (\n # users of similar age are similar\n SubjectSimilarityRule(\n name=\"Users with similar age.\",\n \n # both users have given their age and the difference \n # is lower than five\n condition=lambda user1, user2: \n user1.age and user2.age and abs(user1.age - user2.age) <= 5,\n \n is_positive=True, \n \n weight=0.4,\n \n # a magic linear confidence function\n confidence=lambda user1, user2: \n 1 - float(abs(user1.age - user2.age))/AGE_DIFFERENCE,\n \n description=\"Users %(subject1)s and %(subject2)s are about \" + \n \"the same age.\"\n ), \n ) \n\n cluster_sets = (\n\n \n # user - gender\n SubjectClusterSet(\n \n name='User gender.',\n \n weight=0.5,\n \n # users that have a gender (filled)\n filter_entities=User.objects.exclude(gender=''),\n \n get_cluster_confidence_pairs=lambda user: ((user.gender, 1),),\n \n description=\"%(subject)s's gender is %(cluster)s.\"\n \n ),\n \n # user - country\n SubjectClusterSet(\n \n name='User country.',\n \n weight=0.1,\n \n # users that have country filled\n filter_entities=User.objects.filter(country__isnull=False),\n \n get_cluster_confidence_pairs=lambda user: ((user.country.name, 1),),\n \n description=\"%(subject)s is from %(cluster)s.\"\n ),\n )\n \n save_all_to_predictions = True\n\n random_recommendation_description = \"Recommending a random artist to the user.\"\n \n \n\nclass ArtistRecommender(NovelArtistRecommender):\n \"\"\"A recommender for recommending items no matter if they were heard or not.\"\"\"\n\n name = \"Artist Recommender\"\n \"\"\"The name\"\"\" \n \n remove_predicted_from_recommendations = False \n \"\"\"The already heard artists can appear in recommendations\"\"\"\n \n rules = NovelArtistRecommender.rules + ((SubjectObjectRule(\n name=\"User has listened to the artist.\",\n generator=_listens_artist_generator,\n weight=0.5,\n is_positive=True,\n description=\"%(subject)s listened to %(object)s.\",\n\n # the number of user's scrobbles on artist divided by the number of\n # user's scrobbles overall\n confidence=lambda s, o: \n float(Scrobble.objects.filter(user=s, track__artist=o).count())\\\n /Scrobble.objects.filter(user=s).count(),\n \n \n )),\n )\n \n" }, { "alpha_fraction": 0.5535823702812195, "alphanum_fraction": 0.5584813356399536, "avg_line_length": 28.472726821899414, "blob_id": "3bdb67cdeb002662d10c41d4a19ab745c5a01cd5", "content_id": "ba2543ea7825f6c68f2f9989d2aaad071fe2cebd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3266, "license_type": "no_license", "max_line_length": 99, "num_lines": 110, "path": "/code/adapter/flixster/models.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Models for the flixster app\"\"\"\nfrom django.db import models\nfrom django.db.models import Min\n\nfrom unresyst.models import BaseEvaluationPair\nfrom constants import *\n\nclass User(models.Model):\n \"\"\"The user\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return 'user_%d' % self.id \n \n \nclass Movie(models.Model): \n \"\"\"The movie\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return 'movie_%d' % self.id \n\n \nclass Friend(models.Model):\n \"\"\"A user is a friend of another user. Symmetric. Smaller id first.\"\"\"\n\n friend1 = models.ForeignKey('flixster.User', related_name='friends1')\n \"\"\"The first user in the friend relationship\"\"\" \n \n friend2 = models.ForeignKey('flixster.User', related_name='friends2')\n \"\"\"The second user in the friend relationship\"\"\" \n \n class Meta:\n unique_together = ('friend1', 'friend2') \n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u'(%s, %s)' % (self.friend1, self.friend2)\n\n \nclass Rating(models.Model):\n \"\"\"A user rating a movie\"\"\"\n \n user = models.ForeignKey('flixster.User')\n \"\"\"The user\"\"\"\n \n movie = models.ForeignKey('flixster.Movie')\n \"\"\"The movie\"\"\"\n \n rating = models.DecimalField(max_digits=2, decimal_places=1)\n \"\"\"The rating\"\"\"\n\n class Meta:\n unique_together = ('user', 'movie')\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u'%s - %s: %f' % (self.user, self.movie, self.rating)\n\n\nclass MovieEvaluationPair(BaseEvaluationPair):\n \"\"\"Movie test pairs\"\"\"\n\n subj = models.ForeignKey('flixster.User')\n \"\"\"The subject\"\"\"\n \n obj = models.ForeignKey('flixster.Movie')\n \"\"\"The object\"\"\"\n \n test_ratio = 0.2\n \"\"\"The ratio of pairs to select to test pairs\"\"\"\n\n class Meta:\n app_label = 'flixster' \n \n @classmethod \n def select(cls, i=0):\n \"\"\"See the base class for the documentation.\"\"\"\n\n n = int(1 / cls.test_ratio) \n all_count = Rating.objects.count() \n \n it = 0 \n # take every n-th rating, remove it and put it to test data \n # save to test, remove from build\n for rating in Rating.objects.order_by('id').iterator():\n\n it += 1\n\n # if we aren't on the nth object go ahead\n if it % n != i:\n continue\n \n # save the test object and delete it from train\n cls.objects.create(\n subj=rating.user,\n obj=rating.movie,\n expected_expectancy=rating.rating/MAX_STARS)\n \n rating.delete()\n \n \n test_count = cls.objects.count()\n \n print \"%d test pairs selected from total %d pairs\" % (test_count, all_count) \n\n\n def get_success(self):\n \"\"\"See the base class for the documentation.\"\"\"\n return abs(self.obtained_expectancy - self.expected_expectancy) < MAX_TOLERANCE / MAX_STARS\n \n \n \n" }, { "alpha_fraction": 0.6610608100891113, "alphanum_fraction": 0.6610608100891113, "avg_line_length": 31.20833396911621, "blob_id": "3334f4d95e591e313845bdaff67bb82b060bdc3d", "content_id": "efc32801bbcc7d2e860c6653c2dacbea11ff5a43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "no_license", "max_line_length": 94, "num_lines": 24, "path": "/code/adapter/urls.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "from django.conf.urls.defaults import *\n\nfrom settings import SERVE_STATIC_FILES, MEDIA_ROOT\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n \n # the demo app \n (r'^', include('demo.urls', namespace='demo'))\n # Uncomment the admin/doc line below to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # (r'^admin/', include(admin.site.urls)),\n \n)\nif SERVE_STATIC_FILES:\n urlpatterns += patterns('',\n # for working with static files (for development only)\n (r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT}) \n )\n" }, { "alpha_fraction": 0.5111602544784546, "alphanum_fraction": 0.5206112861633301, "avg_line_length": 35.74074172973633, "blob_id": "648a211ed35ae2ae4920e4491edf2db3d0765fc8", "content_id": "4ce47597afb1a0b95f3938f187880230f8333f0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4973, "license_type": "no_license", "max_line_length": 114, "num_lines": 135, "path": "/code/adapter/travel/recbckp.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": " rules = ( \n # click = sign of preference\n SubjectObjectRule(\n name='User has clicked on something on the tour profile.',\n weight=0.5,\n condition=None,\n is_positive=True,\n description='User %(subject)s has clicked on something on the %(object)s profile.',\n # pairs that user has clicked on the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in Click.objects.values_list('session__user', 'tour').distinct()),\n # the average is around 3, so take 1/6. so that 3 points to the middle.\n confidence=lambda u, t: min(float(Click.objects.filter(tour=t, session__user=u).count())/6, 1.0),\n ),\n \n # question .. also a sign of preference\n SubjectObjectRule(\n name='User has asked about the tour.',\n weight=0.5,\n condition=None,\n is_positive=True,\n description='User %(subject)s has asked about %(object)s.',\n # pairs that user has asked on the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in Question.objects.values_list('session__user', 'tour').distinct()),\n confidence=lambda u, t: min(float(Question.objects.filter(tour=t, session__user=u).count())/2, 1.0),\n ),\n \n # mouse move .. also a sign of preference\n SubjectObjectRule(\n name='User has moved the mouse on the tour profile.',\n weight=0.5,\n condition=None,\n is_positive=True,\n description='User %(subject)s has moved the mouse on %(object)s.',\n # pairs that user has moved on the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in MouseMove.objects.values_list('session__user', 'tour').distinct()),\n confidence=lambda u, t: min(float(MouseMove.objects.filter(tour=t, session__user=u).count())/18, 1.0),\n ),\n \n # view profile \n SubjectObjectRule(\n name='User has viewed the tour profile page.',\n weight=0.5,\n condition=None,\n is_positive=True,\n description='User %(subject)s has viewed %(object)s.',\n # pairs that user has viewed the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in ViewProfile.objects.values_list('session__user', 'tour').distinct()),\n # how many times * how long\n confidence=_viewed_profile_confidence\n ),\n\n \n )\n \n cluster_sets = (\n # cluster - tour types\n ObjectClusterSet(\n\n name=\"Tour type cluster set.\",\n\n weight=0.5,\n \n filter_entities=Tour.objects.all(),\n \n get_cluster_confidence_pairs=lambda tour: ((tour.tour_type.name, 1),),\n \n description=\"The tour %(object)s has type %(cluster)s.\",\n ),\n \n # cluster - countries\n ObjectClusterSet(\n\n name=\"Country cluster set.\",\n\n weight=0.5,\n \n filter_entities=Tour.objects.all(),\n \n get_cluster_confidence_pairs=lambda tour: ((tour.country.name, 1),),\n \n description=\"The tour %(object)s is to %(cluster)s country.\",\n ),\n\n )\n \n biases = (\n # multiply viewed tours\n ObjectBias(\n name=\"Most viewed tours.\",\n \n description=\"Tour %(object)s is much viewed\",\n \n weight=0.5,\n \n is_positive=True,\n \n generator=lambda: Tour.objects.annotate(hh=Count('viewprofile')).filter(hh__gt=2).distinct(),\n \n confidence=lambda t: min(float(t.viewprofile_set.count())/4, 1.0)\n ),\n \n # multiply clicked tours\n ObjectBias(\n name=\"Most clicked tours.\",\n \n description=\"Tour %(object)s is often clicked on.\",\n \n weight=0.5,\n \n is_positive=True,\n \n generator=lambda: Tour.objects.annotate(hh=Count('click')).filter(hh__gt=1).distinct(),\n \n confidence=lambda t: min(float(t.click_set.count())/2, 1.0)\n ),\n \n # multiply mouse moved tours\n ObjectBias(\n name=\"Most mouse moved tours.\",\n \n description=\"Tour %(object)s is often mouse moved.\",\n \n weight=0.5,\n \n is_positive=True,\n \n generator=lambda: Tour.objects.annotate(hh=Count('mousemove')).filter(hh__gt=6).distinct(),\n \n confidence=lambda t: min(float(t.mousemove_set.count())/12, 1.0)\n ),\n )\n\n\n\n \n\n" }, { "alpha_fraction": 0.8060836791992188, "alphanum_fraction": 0.8060836791992188, "avg_line_length": 28.22222137451172, "blob_id": "03439cbcfe150685e23ccce6e7ba2bcdefedf629", "content_id": "83dd87a6ac3bcf90c1e71fdb345da51af7d52ce5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 57, "num_lines": 9, "path": "/code/adapter/unresyst/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"A package holding the whole unresyst application.\n\nThe only class to be used outside is the BaseRecommender.\n\"\"\"\n\nfrom recommender.recommender import Recommender\nfrom recommender.rules import *\nfrom recommender.clusters import *\nfrom recommender.bias import *\n" }, { "alpha_fraction": 0.5788713097572327, "alphanum_fraction": 0.5795742869377136, "avg_line_length": 35.88328552246094, "blob_id": "85ee8cdf4107b400d05f293407aa78ee45437b76", "content_id": "0755784d1a25ffa1dfb225da4565b41a8ff03c88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25605, "license_type": "no_license", "max_line_length": 120, "num_lines": 694, "path": "/code/adapter/unresyst/recommender/recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Module containing the definition of BaseRecommender: the base class \nfor user-defined recommenders.\n\nContents:\n - Recommender: the recommender for client subclassing\n - MetaRecommender: the metaclass for creating recommender classes\n\"\"\"\nimport math\nimport copy\nimport csv\n\nfrom base import BaseRecommender\nfrom predictions import RelationshipPrediction\nfrom unresyst.constants import *\nfrom unresyst.exceptions import ConfigurationError, InvalidParameterError, \\\n RecommenderNotBuiltError\nfrom unresyst.abstractor import BasicAbstractor \nfrom unresyst.aggregator import LinearAggregator, CombiningAggregator\nfrom unresyst.algorithm import SimpleAlgorithm, AggregatingAlgorithm, CompilingAlgorithm\nfrom unresyst.models.common import SubjectObject, Recommender as RecommenderModel\nfrom unresyst.compilator import GetFirstCompilator, CombiningCompilator\nfrom unresyst.combinator import AverageCombinator, TwistedAverageCombinator, ConfidenceFactorCombinator\nfrom unresyst.models.abstractor import RelationshipInstance, ExplicitRuleInstance\nfrom unresyst.models.algorithm import RelationshipPredictionInstance\n\ndef _assign_recommender(list_rels, recommender):\n \"\"\"Go throuth the list, if the items have the \"recommender\" attribute,\n create a copy to the returning list, if not put it there directly\n \n @param list_rels: the list of rules/relationships\n \n @param recommender: the recommender they belong to \n \n @rtype: tuple of rules/relationships\n @return: the tuple of new or copied rules/relationships\n \"\"\"\n new_rels = []\n \n # go through the rels \n for rel in list_rels:\n \n # if they already have a recommender assigned, copy them\n if hasattr(rel, \"recommender\"):\n rel = copy.copy(rel)\n \n # assign the recommender and append it \n rel.recommender = recommender\n new_rels.append(rel)\n \n return tuple(new_rels)\n\nclass MetaRecommender(type):\n \"\"\"The meta-class adding a reference to the class for the contained\n rules and relationships.\n \"\"\"\n \n def __init__(cls, name, bases, dct): \n \"\"\"The class initializer.\n \n Adds the reference to the recommender class to all of the rules\n and relationships.\n \n Tries searching for the recommender model in the database.\n \"\"\"\n \n super(MetaRecommender, cls).__init__(name, bases, dct)\n \n # add the recommender class to the predicted relationship\n if cls.predicted_relationship: \n\n # if it already has a recommender assigned, create a copy.\n if hasattr(cls.predicted_relationship, 'recommender'):\n cls.predicted_relationship = copy.copy(cls.predicted_relationship)\n \n cls.predicted_relationship.recommender = cls\n \n # then to the rules\n if cls.rules:\n cls.rules = _assign_recommender(list_rels=cls.rules, recommender=cls) \n \n # finally to the relationships\n if cls.relationships:\n cls.relationships = _assign_recommender(\n list_rels=cls.relationships, \n recommender=cls)\n \n # moreover to the cluster sets\n if cls.cluster_sets:\n cls.cluster_sets = _assign_recommender(\n list_rels=cls.cluster_sets,\n recommender=cls) \n \n # and to the biases\n if cls.biases:\n cls.biases = _assign_recommender(\n list_rels=cls.biases,\n recommender=cls) \n\n\nclass Recommender(BaseRecommender):\n \"\"\"The base class for all user-defined recommenders.\n \n Implements the interface of the recommender. It doesn't hold any \n domain specific data.\n \n Defines default behaviour assigning the classes for the layers. \n \n The predicted relationship is ignored when building and updating the \n recommender. If it's needed, one should create a rule/relationship \n with the appropriate condition.\n \n The contained rules and relationships have to be included in only one\n recommender class. There can't be a rule/relationship instance \n which is contained in two recommender classes.\n \"\"\"\n __metaclass__ = MetaRecommender\n \"\"\"A metaclass putting the reference to the recommender to all member\n rules and relationships.\n \"\"\" \n \n # Build phase:\n #\n \n @classmethod\n def build(cls):\n \"\"\"For documentation, see the base class\"\"\"\n \n \n # validate the subclass data\n # \n \n # Subjects, objects non-empty\n if not cls.subjects.all().exists():\n raise ConfigurationError(\n message=\"No subjects given\",\n recommender=cls,\n parameter_name=\"Recommender.subjects\",\n parameter_value=cls.subjects\n )\n \n if not cls.objects.all().exists():\n raise ConfigurationError(\n message=\"No objects given\",\n recommender=cls,\n parameter_name=\"Recommender.objects\",\n parameter_value=cls.objects\n )\n \n \n # predicted relationship given\n if not cls.predicted_relationship:\n raise ConfigurationError(\n message=\"No predicted relationship given\",\n recommender=cls,\n parameter_name=\"Recommender.predicted_relationship\",\n parameter_value=cls.predicted_relationship\n )\n \n # rules and relationships don't have to be given\n \n cls._print('Recommender validated, deleting old objects...')\n \n # if the recommender with the given name exists, delete it,\n RecommenderModel.objects.filter(class_name=cls.__name__).delete()\n \n # create a new recommender and save it, keep it in the class\n recommender_model = RecommenderModel(\n class_name=cls.__name__,\n name=cls.name,\n is_built=False,\n are_subjects_objects=(cls.subjects == cls.objects),\n random_recommendation_description=cls.random_recommendation_description,\n remove_predicted_from_recommendations=cls.remove_predicted_from_recommendations\n ) \n recommender_model.save() \n \n # build the recommender model\n #\n #\n cls._print(\"Old objects deleted. Creating universal subjectobjects...\")\n \n # Abstractor\n #\n \n # create the domain neutral representation for objects and subjects\n cls.abstractor.create_subjectobjects(\n recommender_model=recommender_model,\n subjects=cls.subjects, \n objects=cls.objects\n )\n \n cls._print(\"Universal subject and object representations created. Creating predicted_relationship instances...\")\n \n # create the relationship instances for the predicted relationship\n cls.abstractor.create_predicted_relationship_instances( \n predicted_relationship=cls.predicted_relationship \n )\n \n cls._print(\"Predicted relationship instances created. Creating relationship instances...\")\n \n # create relationship instances between subjects/objects \n cls.abstractor.create_relationship_instances(\n relationships=cls.relationships\n ) \n \n cls._print(\"Relationship instances created. Creating rule instances...\")\n \n # evaluate rules and make rule instances between the affected \n # subjects/objects\n cls.abstractor.create_rule_instances(rules=cls.rules)\n \n cls._print(\"Rule instances created. Creating clusters...\")\n \n # evaluate the clusters and their members\n cls.abstractor.create_clusters(cluster_sets=cls.cluster_sets)\n \n cls._print(\"Clusters created. Creating biases...\")\n \n # evaluate the biases\n cls.abstractor.create_biases(biases=cls.biases)\n \n cls._print(\"Biases created. Aggregating...\")\n\n \n \n # Algorithm\n # \n # build the algorithm model from the aggregated relationships\n cls.algorithm.build(recommender_model=recommender_model)\n \n cls._print(\"Algorithm built.\")\n \n # if it should be done and predicted should be removed, \n # save predicted_rel to predictions\n if cls.remove_predicted_from_recommendations and cls.save_all_to_predictions:\n \n cls._print(\"Saving explicit/predicted to predictions...\")\n \n # if explicit relationship is available, get its instances\n # if not, get the predicted_rel\n qs_predicted_rels = ExplicitRuleInstance.objects.filter(definition__recommender=recommender_model) \\\n if cls.explicit_rating_rule else \\\n RelationshipInstance.filter_predicted(recommender_model=recommender_model)\n \n for ri in qs_predicted_rels:\n \n # get the expectancy of the rating or the trivial\n expectancy = ri.expectancy if cls.explicit_rating_rule else TRIVIAL_EXPECTANCY \n \n rpi, created = RelationshipPredictionInstance.objects.get_or_create(\n subject_object1=ri.subject_object1,\n subject_object2=ri.subject_object2,\n recommender=recommender_model,\n defaults={\n 'expectancy': expectancy,\n 'is_trivial': True,\n 'description': ri.description,\n }\n )\n \n # if it was found update it to the predicted\n if not created:\n rpi.expectancy = expectancy\n rpi.is_trivial = True\n rpi.description = ri.description\n \n # save it if created or not \n rpi.save()\n \n # mark the recommender as built, save it and keep it in the class\n recommender_model.is_built = True\n recommender_model.save()\n \n cls._print('Done')\n\n\n # Recommend phase:\n # \n \n @classmethod \n def predict_relationship(cls, subject, object_, save_to_db=False):\n \"\"\"For documentation, see the base class\"\"\" \n \n recommender_model = cls._get_recommender_model()\n # if the recommender isn't built raise an error\n if not recommender_model or not recommender_model.is_built:\n raise RecommenderNotBuiltError(\n message=\"Build the recommender prior to performing the \" + \\\n \"predict_relationship action.\",\n recommender=cls\n )\n \n \n subject_ent_type = ENTITY_TYPE_SUBJECT \\\n if not recommender_model.are_subjects_objects \\\n else ENTITY_TYPE_SUBJECTOBJECT\n\n object_ent_type = ENTITY_TYPE_OBJECT \\\n if not recommender_model.are_subjects_objects \\\n else ENTITY_TYPE_SUBJECTOBJECT\n\n # get the domain neutral representations for the subject and object\n try:\n dn_subject = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=subject,\n entity_type=subject_ent_type,\n recommender=recommender_model\n )\n except SubjectObject.DoesNotExist, e:\n raise InvalidParameterError(\n message=\"The subject wasn't found in the recommender database.\" + \\\n \"Try rebuilding the recommender. Exception: %s\" % e,\n recommender=cls,\n parameter_name='subject', \n parameter_value=subject) \n\n try: \n dn_object = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=object_,\n entity_type=object_ent_type,\n recommender=recommender_model\n )\n except SubjectObject.DoesNotExist, e:\n raise InvalidParameterError(\n message=\"The object wasn't found in the recommender database.\" + \\\n \"Try rebuilding the recommender. Exception: %s\" % e,\n recommender=cls,\n parameter_name='object_', \n parameter_value=object_) \n \n # get the prediction from the algorithm\n prediction_model = cls.algorithm.get_relationship_prediction(\n recommender_model=recommender_model,\n dn_subject=dn_subject,\n dn_object=dn_object,\n remove_predicted=cls.remove_predicted_from_recommendations\n )\n \n # if it should be done and we know something about the pair\n if save_to_db and not prediction_model.is_uncertain:\n prediction_model.save()\n \n # create and return the outer-world object\n prediction = RelationshipPrediction(\n subject=subject,\n object_=object_,\n expectancy=prediction_model.expectancy,\n explanation=prediction_model.description,\n is_uncertain=prediction_model.is_uncertain\n ) \n return prediction\n\n\n @classmethod\n def get_recommendations(cls, subject, count=None): \n \"\"\"For documentation, see the base class\"\"\"\n \n recommender_model = cls._get_recommender_model()\n \n # if the recommender isn't built raise an error\n if not recommender_model or not recommender_model.is_built:\n raise RecommenderNotBuiltError(\n message=\"Build the recommender prior to performing the \" + \\\n \"get_recommendations action.\",\n recommender=cls\n )\n \n # if count wasn't given take the default one\n if not count:\n count = cls.default_recommendation_count\n \n subject_ent_type = ENTITY_TYPE_SUBJECT \\\n if not recommender_model.are_subjects_objects \\\n else ENTITY_TYPE_SUBJECTOBJECT\n \n # convert the the subject to domain neutral\n try:\n dn_subject = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=subject,\n entity_type=subject_ent_type,\n recommender=recommender_model\n )\n except SubjectObject.DoesNotExist, e:\n raise InvalidParameterError(\n message=\"The subject wasn't found in the recommender database.\" + \\\n \"Try rebuilding the recommender. Exception: %s\" % e,\n recommender=cls,\n parameter_name='subject', \n parameter_value=subject) \n\n limit = cls.recommendation_expectancy_limit \\\n if not cls.recommendation_expectancy_limit is None else 0\n\n # get the recommendations from the algorithm\n prediction_models = cls.algorithm.get_recommendations(\n recommender_model=recommender_model,\n dn_subject=dn_subject,\n count=count,\n expectancy_limit=limit,\n remove_predicted=cls.remove_predicted_from_recommendations\n )\n \n recommendations = []\n \n # go through the obtained predictions\n for pred_model in prediction_models:\n\n # obtain the object from the prediction\n dn_object = pred_model.get_related(dn_subject)\n \n # get its domain specific representation\n object_ = dn_object.get_domain_specific_entity(entity_manager=cls.objects)\n \n # create the outer-world object\n prediction = RelationshipPrediction(\n subject=subject,\n object_=object_,\n expectancy=pred_model.expectancy,\n explanation=pred_model.description,\n is_uncertain=pred_model.is_uncertain\n ) \n \n recommendations.append(prediction)\n\n return recommendations\n\n @classmethod\n def export_predictions(cls, filename):\n \"\"\"Export all predictions to a csv \n file of the given name.\n \n @type filename: str\n @param filename: the full path to the file\n \n @raise FileNotExists and other file open errors.\n \"\"\"\n recommender_model = cls._get_recommender_model()\n \n with open(filename, 'w') as f:\n \n i = 0\n \n # loop through pairs, export the prediction instances\n for rpi in RelationshipPredictionInstance.objects.filter(recommender=recommender_model):\n \n # create the common part\n linestr = \"%s,%s,%s\\n\" % (\n rpi.subject_object1.id_in_specific, \n rpi.subject_object2.id_in_specific,\n rpi.expectancy) \n \n # write it to the file\n f.write(linestr)\n \n i += 1\n \n print \" %d predictions exported\" % i\n\n @classmethod\n def update_predictions(cls, filename):\n \"\"\"Load predictions for pairs that are unknown in our recommender from \n the given csv file.\n \n The file has to be in format:\n <id subject>,<id object>,<prediction>\\n\n \n @type filename: str\n @param filename: the full path to the file\n\n @raise FileNotExists and other file open errors.\n \"\"\"\n recommender_model = cls._get_recommender_model() \n \n # open the csv reader\n reader = csv.reader(open(filename, \"rb\"), delimiter=',', quoting=csv.QUOTE_NONE)\n \n i = 0\n # parse the csv line by line\n for subj_id, obj_id, expectancy in reader:\n \n # parse the values to the right types\n subj_id = int(subj_id)\n obj_id = int(obj_id)\n expectancy = float(expectancy)\n \n dn_subject = SubjectObject.objects.get(\n entity_type=ENTITY_TYPE_SUBJECT, \n id_in_specific=subj_id,\n recommender=recommender_model)\n dn_object = SubjectObject.objects.get(\n entity_type=ENTITY_TYPE_OBJECT, \n id_in_specific=obj_id,\n recommender=recommender_model)\n \n # if it's already imported, go to the next pair\n if RelationshipPredictionInstance.objects.filter(\n subject_object1=dn_subject, \n subject_object2=dn_object, \n recommender=recommender_model).exists():\n continue\n \n # create a prediction\n RelationshipPredictionInstance.objects.create(\n subject_object1=dn_subject,\n subject_object2=dn_object,\n recommender=recommender_model,\n expectancy=expectancy\n )\n\n i += 1\n\n print \"%d new predictions imported\" % i\n \n \n \n \n \n # Update phase:\n # \n @classmethod\n def add_subject(cls, subject):\n \"\"\"For documentation, see the base class\"\"\"\n \n # add the object to abstractor rule/relationship instances, \n # see what has changed\n instance_changes = cls.Abstractor.add_subject(\n recommender=cls.__name__,\n subject=subject\n )\n \n # let the Aggregator update the aggregates\n aggregated_changes = cls.Aggregator.update(\n recommender=cls.__name__,\n instance_changes=instance_changes\n )\n \n # let the Algorithm update its structures\n cls.Algorithm.update(\n recommender=cls.__name__,\n aggregated_changes=aggregated_changes\n )\n\n\n @classmethod\n def add_object(cls, object_):\n \"\"\"For documentation, see the base class\"\"\"\n \n # add the object to abstractor rule/relationship instances\n # see what has changed\n instance_changes = cls.Abstractor.add_object(\n recommender=cls.__name__,\n object_=object_\n )\n # pak nejak podobne - asi na to udelat fci. \n\n @classmethod\n def update_subject(cls, subject):\n \"\"\"For documentation, see the base class\"\"\"\n\n # update the subject in abstractor rule/relationship instances\n # see what has changed\n instance_changes = cls.Abstractor.update_subject(\n recommender=cls.__name__,\n subject=subject\n )\n # pak nejak podobne - asi na to udelat fci.\n\n\n @classmethod \n def update_object(cls, object_):\n \"\"\"For documentation, see the base class\"\"\"\n\n # update the object in abstractor rule/relationship instances\n # see what has changed\n instance_changes = cls.Abstractor.update_object(\n recommender=cls.__name__,\n object_=object_\n )\n # pak nejak podobne - asi na to udelat fci. \n \n @classmethod\n def remove_subject(cls, subject):\n \"\"\"For documentation, see the base class\"\"\"\n\n # remove the subject from abstractor rule/relationship instances\n # see what has changed\n instance_changes = cls.Abstractor.remove_subject(\n recommender=cls.__name__,\n subject=subject\n )\n # pak nejak podobne - asi na to udelat fci.\n\n\n @classmethod \n def remove_object(cls, object_):\n \"\"\"For documentation, see the base class\"\"\"\n\n # update the object in abstractor rule/relationship instances\n # see what has changed\n instance_changes = cls.Abstractor.remove_object(\n recommender=cls.__name__,\n object_=object_\n )\n # pak nejak podobne - asi na to udelat fci. \n \n \n # Class configuration - the behaviour of the layers below the recommender\n # Can be overriden in user defined subclasses\n \n abstractor = BasicAbstractor()\n \"\"\"The class that will be used for the abstractor level. Can be \n overriden in suclasses\"\"\" \n \n algorithm = AggregatingAlgorithm(\n inner_algorithm=CompilingAlgorithm(\n inner_algorithm=SimpleAlgorithm(\n inner_algorithm=None\n ),\n compilator=CombiningCompilator(combinator=TwistedAverageCombinator(), breadth=0) #\n ),\n aggregator=CombiningAggregator(combinator=TwistedAverageCombinator())\n )\n \"\"\"The default algorithm setup.\n The class that will be used for the algorithm level. Can be \n overriden in subclasses\"\"\" \n \n # TwistedAverageCombinator\n # AverageCombinator\n # ConfidenceFactorCombinator\n \n default_recommendation_count = DEFAULT_RECOMMENDATION_COUNT\n \"\"\"The defaul count of the obtained recommended objects\"\"\"\n \n remove_predicted_from_recommendations = True\n \"\"\"The entity pairs that already have the predicted_relationship\n between them, will be removed from recommendation list.\n \"\"\"\n \n random_recommendation_description = \"A random object\"\n \"\"\"The description for random recommendations Can be overriden in\n subclass\"\"\"\n \n verbose_build = True\n \"\"\"Should messages be printed during the build?\"\"\"\n \n save_all_to_predictions = True\n \n # Auxiliary methods - not to be used from outside the application\n # \n @classmethod\n def _get_recommender_model(cls):\n \"\"\"Get the recommender model belonging to the class\"\"\"\n \n # can't be caching 'cause database can die out without notifying the \n # if it's already saved, return it\n #if cls._recommender_model:\n # return cls._recommender_model\n \n # otherwise try finding it in database \n models = RecommenderModel.objects.filter(class_name=cls.__name__)\n \n # if the recommender was found, assign it to the class and return it\n if models: \n assert len(models) == 1 \n return models[0]\n \n # if not return None\n return None \n \n @classmethod\n def _get_entity_manager(cls, entity_type):\n \"\"\"Get the manager from the recommender for the given entity type.\n \n @type entity_type: str\n @param entity_type: the type of the entity 'S'/'O'/'SO'\n \n @rtype: django.db.models.manager.Manager\n @return: the manager over the domain specific entities.\n \n @raise KeyError: when the entity_type is invalid \n \"\"\"\n \n manager_dict = {\n ENTITY_TYPE_SUBJECT: cls.subjects,\n ENTITY_TYPE_OBJECT: cls.objects, \n ENTITY_TYPE_SUBJECTOBJECT: cls.objects # or sujbects if you care\n }\n \n return manager_dict[entity_type]\n \n @classmethod\n def _print(cls, msg):\n if cls.verbose_build:\n print msg \n" }, { "alpha_fraction": 0.715423583984375, "alphanum_fraction": 0.7286990284919739, "avg_line_length": 27.57241439819336, "blob_id": "a6e4e96e9efadd85c404c0083ff7cea0be5b242a", "content_id": "eb3b0ec2bf38722d752add83f7bf080bf7a09b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4143, "license_type": "no_license", "max_line_length": 87, "num_lines": 145, "path": "/UnresystCD/code/adapter/unresyst/constants.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The constants used in Unresyst\"\"\"\n\n_ = lambda x: x\n\nMAX_LENGTH_NAME = 80\n\"\"\"The maximum length of the name in the universal representation.\"\"\"\n\nMAX_LENGTH_CLASS_NAME = 40\n\"\"\"The maximum length of the class name.\"\"\"\n\nMAX_LENGTH_ENTITY_TYPE = 2\n\"\"\"The maximum length of the entity type string\"\"\"\n\nMAX_LENGTH_RELATIONSHIP_TYPE = 5\n\"\"\"The maximum length of the relationship type string\"\"\"\n\nMAX_LENGTH_ID = 50\n\"\"\"The maximum length of the id in parent system\"\"\"\n\nENTITY_TYPE_SUBJECT = 'S'\n\"\"\"The subject entity type\"\"\"\n\nENTITY_TYPE_OBJECT = 'O'\n\"\"\"The object entity type\"\"\"\n\nENTITY_TYPE_SUBJECTOBJECT = 'SO'\n\"\"\"The subject object entity type\"\"\"\n\nENTITY_TYPE_CHOICES = (\n # a subject:\n (ENTITY_TYPE_SUBJECT, _('Subject')),\n \n # an object:\n (ENTITY_TYPE_OBJECT, _('Object')),\n \n # when subject domain is the same as object domain\n (ENTITY_TYPE_SUBJECTOBJECT, _('Subject == Object')),\n)\n\"\"\"Choices for the entity_type field\"\"\"\n\nRELATIONSHIP_TYPE_SEPARATOR = '-'\n\nRELATIONSHIP_TYPE_SUBJECT_OBJECT = \\\n ENTITY_TYPE_SUBJECT + RELATIONSHIP_TYPE_SEPARATOR + ENTITY_TYPE_OBJECT\n\"\"\"Subject-object relationship type\"\"\"\n\nRELATIONSHIP_TYPE_SUBJECT_SUBJECT = \\\n ENTITY_TYPE_SUBJECT + RELATIONSHIP_TYPE_SEPARATOR + ENTITY_TYPE_SUBJECT \n\"\"\"Subject-subject relationship type\"\"\"\n\nRELATIONSHIP_TYPE_OBJECT_OBJECT = \\\n ENTITY_TYPE_OBJECT + RELATIONSHIP_TYPE_SEPARATOR + ENTITY_TYPE_OBJECT\n\"\"\"Object-object relatioship type\"\"\"\n\nRELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT = \\\n ENTITY_TYPE_SUBJECTOBJECT + RELATIONSHIP_TYPE_SEPARATOR + ENTITY_TYPE_SUBJECTOBJECT\n\"\"\"Subjectobject-subjectobject relatioship type\"\"\"\n\nRELATIONSHIP_TYPE_CHOICES = (\n # a subject-object relationship\n (RELATIONSHIP_TYPE_SUBJECT_OBJECT, _('Subejct-Object')),\n \n # a subject-subject relationship\n (RELATIONSHIP_TYPE_SUBJECT_SUBJECT, _('Subject-Subject')),\n \n # an object-object relationship\n (RELATIONSHIP_TYPE_OBJECT_OBJECT, _('Object-Object')),\n \n # a relationship for recommender where subject domain equals object domain\n (RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT, _('SubjectObject-SubjectObject')),\n)\n\"\"\"Choices for the relationship_type field\"\"\"\n\nFORMAT_STR_SUBJECT = \"subject\"\nFORMAT_STR_OBJECT = \"object\"\nFORMAT_STR_SUBJECT1 = \"subject1\"\nFORMAT_STR_SUBJECT2 = \"subject2\"\nFORMAT_STR_OBJECT1 = \"object1\"\nFORMAT_STR_OBJECT2 = \"object2\"\nFORMAT_STR_SUBJECTOBJECT = 'subjectobject'\nFORMAT_STR_SUBJECTOBJECT1 = \"subjectobject1\"\nFORMAT_STR_SUBJECTOBJECT2 = \"subjectobject2\"\nFORMAT_STR_CLUSTER = \"cluster\"\n\"\"\"Format strings used in the explanation field.\"\"\"\n\nCOMPILATOR_DEPTH_ONE_UNSURE = 1\n\"\"\"One unsure relationship is used [+ predicted_relationship]\"\"\"\n\nCOMPILATOR_DEPTH_COMING_SOON = 2\n\"\"\"Uvidime\"\"\"\n\nDEFAULT_RECOMMENDATION_COUNT = 10\n\"\"\"The defaul count of the obtained recommended objects\"\"\"\n\nDEFAULT_COMPILATOR_BREADTH = 10\n\"\"\"The default neighbourhood size for the compilator\"\"\"\n\nDEFAULT_COMPILATOR_PAIR_DEPTH = 10\n\"\"\"The default compilation element count taken for a pair for a group\"\"\"\n\nDEFAULT_COMPILATOR_DEPTH = COMPILATOR_DEPTH_ONE_UNSURE\n\"\"\"Take only one unsure relationship\"\"\"\n\nUNCERTAIN_PREDICTION_VALUE = 0.5\n\"\"\"The value that is returned when the prediction for the pair isn't \navailable\"\"\"\n\nALREADY_IN_REL_PREDICTION_VALUE = 1.0\n\"\"\"The value that is returned when the pair already is in the \npredicted_relationship\"\"\"\n\nMIN_WEIGHT = 0.0\nMAX_WEIGHT = 1.0\n\"\"\"Weight limits\"\"\"\n\nMIN_CONFIDENCE = 0.0\nMAX_CONFIDENCE = 1.0\n\"\"\"Confidence limits\"\"\"\n\nMIN_EXPECTANCY = 0.0\nMAX_EXPECTANCY = 1.0\n\"\"\"Expectancy limits\"\"\"\n\nTRIVIAL_EXPECTANCY = 1.0\n\"\"\"The expectancy that is given for the trivial cases - already \nlistened to the artist.\"\"\"\n\nCONFIDENCE_KWARG_NAME = 'confidence'\nEXPECTANCY_KWARG_NAME = 'expectancy'\n\nREASON_STR = \"Reason %d\"\n\nLOG_RECOMMENDATIONS_FILENAME = 'recommendations.txt'\nLOG_PREDICTIONS_FILENAME = 'predictions.txt'\nLOG_HITS_FILENAME = 'hits.txt'\n\nMAX_REASONS_DESCRIPTION = 5\n\nMORE_REASONS_STR = \"... and %d other reasons.\"\n\nPROMISING_RATE = 1.2\n\"\"\"The rate by which the compiler breadth is multiplied to get the number\nof promising objects to be inspected\"\"\"\n\nEXP_PRECISION = 0.00001\n" }, { "alpha_fraction": 0.6530148983001709, "alphanum_fraction": 0.6531229615211487, "avg_line_length": 32.28776931762695, "blob_id": "18f51e08a7555f5c74b5e0faa02ff606f12983dc", "content_id": "fd528e0cb2670f8ca1e5c3a9e6767d53e99cefe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9254, "license_type": "no_license", "max_line_length": 126, "num_lines": 278, "path": "/code/adapter/unresyst/combinator/combination_element.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The elements that are given to combinators to combine them. \nInstancies of the classes represent knowledge that we have about a pair\nto count similarity/preference.\"\"\"\n\nfrom unresyst.constants import *\n\ndef _get_expectancy_positiveness(expectancy):\n return expectancy > UNCERTAIN_PREDICTION_VALUE\n\nclass BaseCombinationElement(object):\n \"\"\"The base for all elements that are being combined in combinator\"\"\"\n \n def __init__(self):\n \"\"\"Initialize the members\"\"\"\n self._positiveness = None\n self._expectancy = None\n self._description = None\n \n def get_positiveness(self):\n \"\"\"Is the element positive/negative to the combination?\"\"\"\n if self._positiveness is None:\n self._positiveness = self._get_positiveness()\n \n return self._positiveness\n \n def get_expectancy(self):\n \"\"\"The expectancy of the element\"\"\"\n \n if self._expectancy is None:\n self._expectancy = self._get_expectancy()\n \n return self._expectancy\n \n def get_description(self):\n \"\"\"The description of the element\"\"\" \n \n if self._description is None:\n self._description = self._get_description()\n \n return self._description\n\n def _get_positiveness(self):\n pass\n\n def _get_expectancy(self):\n pass\n \n def _get_description(self):\n pass\n \n def __repr__(self):\n return \"<%f, %s, %s>\" % (self.get_expectancy(), self.get_positiveness(), self.get_description()) \n\nclass SubjectObjectRelCombinationElement(BaseCombinationElement):\n \"\"\"The relationship between a subject and an object meaning the preference\n in means of the predicted_relationship. \n \n For compilator. \n \"\"\"\n \n def __init__(self, rel_instance):\n \"\"\"The initializer\"\"\"\n \n super(SubjectObjectRelCombinationElement, self).__init__()\n \n self.rel_instance = rel_instance\n\n def _get_positiveness(self):\n return self.rel_instance.definition.as_leaf_class().is_positive\n\n def _get_expectancy(self):\n return self.rel_instance.get_expectancy() \n \n def _get_description(self):\n return self.rel_instance.description \n \n \nclass _SimilarityCombinationElement(BaseCombinationElement):\n \"\"\"The element of a similarity combination\"\"\"\n pass\n \nclass RelSimilarityCombinationElement(_SimilarityCombinationElement):\n \"\"\"The similarity coming out of a rule/relationship\n \n For Aggregator.\n \"\"\"\n \n def __init__(self, rel_instance):\n \"\"\"The initializer\"\"\"\n \n super(RelSimilarityCombinationElement, self).__init__()\n \n self.rel_instance = rel_instance\n \"\"\"The rule/relationship instance the similarity is obtained from\n @type rel_instance: RelationshipInstance\n \"\"\"\n \n def _get_positiveness(self):\n return self.rel_instance.definition.as_leaf_class().is_positive\n\n def _get_expectancy(self):\n return self.rel_instance.get_expectancy() \n \n def _get_description(self):\n return self.rel_instance.description \n\n\nclass ClusterSimilarityCombinationElement(_SimilarityCombinationElement):\n \"\"\"The similarity coming out of a common cluster membership\n\n For combinator and aggregator.\n \"\"\"\n\n def __init__(self, cluster_members):\n \"\"\"The initializer\"\"\"\n \n super(ClusterSimilarityCombinationElement, self).__init__()\n \n self.cluster_members = cluster_members\n \"\"\"The pair of cluster members that caused the similarity\n @type cluster_members: pair ClusterMember, ClusterMember\n \"\"\"\n \n def _get_positiveness(self):\n \"\"\"Cluster membership is always positive\"\"\"\n return True\n \n def _get_expectancy(self):\n \"\"\"Return the product of the reasoning members\"\"\"\n \n return self.cluster_members[0].get_pair_expectancy(cluster_member_pair=self.cluster_members)\n \n def _get_description(self):\n \"\"\"Return the concatenaged member descriptions\"\"\"\n return \" \".join(cm.description for cm in self.cluster_members)\n \nclass BiasCombinationElement(BaseCombinationElement):\n \"\"\"The biase of a subject/object coming to the combination\n For aggregator.\n \"\"\"\n \n def __init__(self, bias_instance):\n \"\"\"The initializer\"\"\"\n \n super(BiasCombinationElement, self).__init__()\n \n self.bias_instance = bias_instance\n \"\"\"Bias instance. \n @type bias_instance: BiasInstance\n \"\"\" \n\n\n def _get_positiveness(self):\n return self.bias_instance.definition.is_positive\n \n def _get_expectancy(self):\n return self.bias_instance.get_expectancy()\n\n def _get_description(self):\n return self.bias_instance.description\n\nclass BiasAggregateCombinationElement(BaseCombinationElement):\n \"\"\"The aggregated bias of a subject/object. \n For compilator.\n \"\"\"\n \n def __init__(self, bias_aggregate):\n \"\"\"The initializer\"\"\"\n \n super(BiasAggregateCombinationElement, self).__init__()\n \n self.bias_aggregate = bias_aggregate\n \"\"\"The aggregated bias.\n @type bias_aggregate: AggregatedBiasInstance\n \"\"\"\n \n def _get_positiveness(self): \n exp = self.get_expectancy()\n return _get_expectancy_positiveness(exp)\n \n def _get_expectancy(self):\n return self.bias_aggregate.expectancy\n \n def _get_description(self):\n return self.bias_aggregate.description\n\n\nclass _PredictedPlusSimilarityCombinationElement(BaseCombinationElement):\n \"\"\"A sign of preference made of known predicted relationship plus similarity.\n For compilator.\n \"\"\"\n \n def __init__(self, predicted_rel, similarity_aggregate):\n \"\"\"The initializer\"\"\"\n \n super(_PredictedPlusSimilarityCombinationElement, self).__init__()\n \n self.predicted_rel = predicted_rel\n \"\"\"The predicted relationship to/from some other object/subject\n @type predicted_rel: RelationshipInstance binded to PredictedRelationshipDefinition\n \"\"\"\n \n self.similarity_aggregate = similarity_aggregate\n \"\"\"The aggregated similarity between the subjects/objects\n @type similarity_aggregate: AggregatedRelationshipInstance\n \"\"\"\n\n def _get_positiveness(self): \n exp = self.get_expectancy()\n return _get_expectancy_positiveness(exp)\n \n def _get_expectancy(self):\n return self.similarity_aggregate.expectancy\n \n\n\nclass PredictedPlusObjectSimilarityCombinationElement(_PredictedPlusSimilarityCombinationElement):\n \"\"\"Predicted relationship plus similarity of objects.\n For compilator.\n \"\"\"\n def _get_description(self):\n return \"%s And similarity: %s\" % (self.predicted_rel.description, self.similarity_aggregate.description)\n\nclass PredictedPlusSubjectSimilarityCombinationElement(_PredictedPlusSimilarityCombinationElement):\n \"\"\"Predicted relationship plus similarity of subjects.\n For compilator.\n \"\"\"\n def _get_description(self):\n return \"Similarity: %s And: %s\" % (self.similarity_aggregate.description, self.predicted_rel.description)\n\n\nclass _PredictedPlusClusterMemberCombinationElement(BaseCombinationElement):\n \"\"\"A sign of preference made of known predicted relationship plus similarity\n coming out of two cluster memberships.\n\n For compilator.\n \"\"\"\n\n def __init__(self, predicted_rel, cluster_combination_element):\n \"\"\"The initializer\"\"\"\n \n super(_PredictedPlusClusterMemberCombinationElement, self).__init__()\n \n self.predicted_rel = predicted_rel\n \"\"\"The predicted relationship to/from some other object/subject\n @type predicted_rel: RelationshipInstance binded to PredictedRelationshipDefinition\n \"\"\"\n \n self.cluster_combination_element = cluster_combination_element\n \"\"\"The pair of cluster memeberships meaning the similarity. In the order as if coming\n from the subject to the object.\n @type cluster_membership_pair: ClusterSimilarityCombinationElement\n \"\"\" \n \n \n def _get_positiveness(self): \n return True\n \n def _get_expectancy(self):\n return self.cluster_combination_element.get_expectancy()\n \n \n\nclass PredictedPlusObjectClusterMemberCombinationElement(_PredictedPlusClusterMemberCombinationElement):\n \"\"\"Predicted relationship plus cluster membership of objects.\n\n For compilator.\n \"\"\"\n def _get_description(self):\n return \"%s And similarity: %s\" % (self.predicted_rel.description, self.cluster_combination_element.get_description())\n\nclass PredictedPlusSubjectClusterMemberCombinationElement(_PredictedPlusClusterMemberCombinationElement):\n \"\"\"Predicted relationship plus cluster membership of subjects.\n\n For compilator.\n \"\"\"\n def _get_description(self):\n return \"Similarity: %s And: %s\" % (self.cluster_combination_element.get_description(), self.predicted_rel.description)\n" }, { "alpha_fraction": 0.6421499252319336, "alphanum_fraction": 0.6584158539772034, "avg_line_length": 23.379310607910156, "blob_id": "41a1d8c02a6c91077ddaa1edff6bc895d5cd9457", "content_id": "56b5bc0bc1ec5156785f4f3dbcdcdd87f27ddaca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 63, "num_lines": 58, "path": "/UnresystCD/code/adapter/lastfm/evaluation.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The evaluators for the last.fm recommenders\"\"\"\n\nfrom unresyst.recommender.evaluation import BaseEvaluator\nfrom unresyst.recommender.rank_evaluation import RankEvaluator\nfrom models import ArtistEvalPair, NovelArtistEvalPair\nfrom unresyst.recommender.metrics import rmse, precision_recall\n\nclass ArtistRecommenderEvaluator(BaseEvaluator):\n \"\"\"The evaluator of the artist recommender\"\"\"\n \n EvaluationPairModel = ArtistEvalPair\n \"\"\"The model - pairs\"\"\"\n \n prediction_metric = rmse\n \"\"\"The metric\"\"\"\n \n recommendation_metric = precision_recall\n \"\"\"The other metric\"\"\"\n \nclass ArtistRankEvaluator(RankEvaluator):\n \"\"\"Evaluating the rank metric\"\"\"\n \n EvaluationPairModel = ArtistEvalPair\n \"\"\"The model - pairs\"\"\"\n \n SUBJ_IDS = [\n 6L,\n 11L,\n 14L,\n 36L,\n 38L,\n 53L,\n 55L,\n 59L,\n 61L,\n 79L,\n 81L,\n 90L,\n ]\n\n \nclass NovelArtistRecommenderEvaluator(BaseEvaluator):\n \"\"\"The evaluator of the novel artist recommender\"\"\"\n \n EvaluationPairModel = NovelArtistEvalPair\n \n prediction_metric = rmse\n \n recommendation_metric = precision_recall \n\n \nclass NovelArtistRankEvaluator(RankEvaluator):\n \"\"\"Evaluation of the rank metric\"\"\"\n \n EvaluationPairModel = NovelArtistEvalPair\n \"\"\"The model - pairs\"\"\"\n \n SUBJ_IDS = ArtistRankEvaluator.SUBJ_IDS\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.680672287940979, "avg_line_length": 33, "blob_id": "00c30c4627da8a8eb34e37fc4c92ac1e3c5d7d6e", "content_id": "9f3810ae018f50fc898187da60be831f6161be69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 92, "num_lines": 14, "path": "/code/adapter/unresyst/combinator/function_combinator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Combinator using the magic function.\"\"\"\n\nfrom base import BaseCombinator\n\nclass FunctionCombinator(BaseCombinator):\n \"\"\"A combinator using a special function to combine.\n \"\"\"\n \n def _combine(self, combination_elements, ResultClass):\n \"\"\"See the base class for documentation\"\"\"\n \n # narvat to do magicke funkce a hotovka (spocitat prumer a pocet positive, negative)\n res = ResultClass(expectancy=1, description='')\n return res\n" }, { "alpha_fraction": 0.48442137241363525, "alphanum_fraction": 0.48961424827575684, "avg_line_length": 33.89610290527344, "blob_id": "ba5938caabc8a4048c33761bc89052173ba4d996", "content_id": "f3491924fcd8f3deade3395e3f275f7523756234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 142, "num_lines": 77, "path": "/UnresystCD/code/adapter/flixster/recbckp.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": " relationships = (\n # users in friendship\n SubjectSimilarityRelationship(\n name=\"Users are friends.\",\n \n generator=lambda: [(f.friend1, f.friend2) for f in Friend.objects.all()], \n \n is_positive=True, \n \n weight=0.5, \n \n description=\"Users %(subject1)s and %(subject2)s are friends.\",\n ),\n )\n \"\"\"The relationships\"\"\"\n \n biases = (\n # people giving high ratings\n SubjectBias(\n name=\"Users giving high ratings.\",\n \n description=\"User %(subject)s gives high ratings.\",\n \n weight=0.5, \n \n is_positive=True,\n \n generator=lambda: User.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__gt=str(MIN_HIGH_RATING)), \n \n confidence=lambda user: user.rating_set.aggregate(Avg('rating'))['rating__avg'] - MIN_HIGH_RATING\n ),\n \n # highly rated movies\n ObjectBias(\n name=\"High-rated movies.\",\n \n description=\"Movie %(object)s is high-rated\",\n \n weight=0.5,\n \n is_positive=True,\n \n generator=lambda: Movie.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__gt=str(MIN_HIGH_RATING)),\n \n confidence=lambda movie: movie.rating_set.aggregate(Avg('rating'))['rating__avg'] - MIN_HIGH_RATING\n ),\n \n # people giving low ratings\n SubjectBias(\n name=\"Users giving low ratings.\",\n \n description=\"User %(subject)s gives low ratings.\",\n \n weight=0.5, \n \n is_positive=False,\n \n generator=lambda: User.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__lt=str(MAX_LOW_RATING)), \n \n confidence=lambda user: MAX_LOW_RATING - user.rating_set.aggregate(Avg('rating'))['rating__avg'] \n ),\n \n # low-rated movies\n ObjectBias(\n name=\"Low-rated movies.\",\n \n description=\"Movie %(object)s is low-rated\",\n \n weight=0.5,\n \n is_positive=False,\n \n generator=lambda: Movie.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__lt=str(MAX_LOW_RATING)),\n \n confidence=lambda movie: MAX_LOW_RATING - movie.rating_set.aggregate(Avg('rating'))['rating__avg']\n ), \n )\n \n" }, { "alpha_fraction": 0.737500011920929, "alphanum_fraction": 0.737500011920929, "avg_line_length": 25.66666603088379, "blob_id": "c3752c1c077960e0f5b803ab01e63cf04f6a1dae", "content_id": "9faa7d4d0922076951d8647a2b6aad672ceadc57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/UnresystCD/code/adapter/lastfm/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Application holding the data and a recommender for \nthe Last.fm dataset.\n\"\"\"\n" }, { "alpha_fraction": 0.5477624535560608, "alphanum_fraction": 0.5494836568832397, "avg_line_length": 34.72093200683594, "blob_id": "3edd44a8d58f005f8feca242a252d6b64be52355", "content_id": "5e27c0fec155d93316bdf2840109384444a73d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4648, "license_type": "no_license", "max_line_length": 91, "num_lines": 129, "path": "/code/adapter/unresyst/recommender/bias.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Classes to represent bias of subjects and objects\"\"\"\n\nfrom unresyst.constants import *\n\nfrom unresyst.models.abstractor import BiasDefinition, BiasInstance\nfrom unresyst.models.common import SubjectObject\n\nclass _BaseBias(object):\n \"\"\"The base class for all bias clases\"\"\"\n \n entity_type = None\n \"\"\"The entity type the bias is for s/o/so\"\"\"\n \n format_string = None\n \"\"\"The format string appearing in the description\"\"\"\n \n def __init__(self, name, generator, is_positive, confidence, weight, description=None):\n \"\"\"The constructor.\"\"\"\n \n self.name = name\n \"\"\"The name of the bias.\"\"\" \n \n self.description = description\n \"\"\"A string describing the rule. It can contain placeholders for entities: \n \n - %(subject)s for subject bias\n - %(object)s for object bias\n - %(subjectobject)s for subjectobject bias\n \"\"\"\n \n self.is_positive = is_positive\n \"\"\"Is the bias positive to the predicted relationship?\"\"\"\n \n self.generator = generator\n \"\"\"A generator returning subjects/objects that are affected by the bias.\n \"\"\" \n \n self.weight = weight\n \"\"\"A float number from [0, 1] representing the *static* weight of the bias\n It doesn't depend on the entity.\n \"\"\"\n \n self.confidence = confidence\n \"\"\"A float function giving values from [0, 1] representing the \n the confidence of the bias of the entity. \n It's dynamic, depends on the entity.\n \"\"\" \n \n def evaluate(self):\n \"\"\"Crate bias definitions and the instances in the database.\n \"\"\"\n \n if not (MIN_WEIGHT <= self.weight <= MAX_WEIGHT):\n raise ConfigurationError(\n message=(\"The bias '%s' provides weight %f,\" + \n \" should be between 0 and 1. .\"\n ) % (self.name, self.weight),\n recommender=self.recommender,\n parameter_name=\"Recommender.biases\",\n parameter_value=(self.recommender.biases)\n )\n \n recommender_model = self.recommender._get_recommender_model()\n\n # create the definition in the database\n definition = BiasDefinition.objects.create(\n name=self.name,\n recommender=recommender_model,\n entity_type=self.entity_type,\n weight=self.weight,\n is_positive=self.is_positive\n ) \n \n # go through the affected entities create bias instances\n # \n for ds_entity in self.generator():\n \n # convert the entity to universal\n dn_entity = SubjectObject.get_domain_neutral_entity(\n domain_specific_entity=ds_entity, \n entity_type=self.entity_type, \n recommender=recommender_model)\n \n # count the confidence by the provided function \n confidence = self.confidence(ds_entity) \n \n # if confidence invalid through an error\n if not (MIN_CONFIDENCE <= confidence <= MAX_CONFIDENCE):\n raise ConfigurationError(\n message=(\"The bias '%s' provides confidence %f,\" + \n \" should be between 0 and 1. \"\n ) % (self.name, confidence),\n recommender=self.recommender,\n parameter_name=\"Recommender.biases\",\n parameter_value=(self.recommender.biases)\n )\n \n # fill the description\n description = self.description % {self.format_string: dn_entity.name}\n \n # create the instance\n BiasInstance.objects.create(\n subject_object=dn_entity,\n confidence=confidence,\n definition=definition,\n description=description\n )\n \n print \" %d bias instances for bias %s created.\" % \\\n (BiasInstance.objects.filter(definition=definition).count(), self.name)\n \nclass SubjectBias(_BaseBias):\n\n entity_type = ENTITY_TYPE_SUBJECT\n \n format_string = FORMAT_STR_SUBJECT\n\n \nclass ObjectBias(_BaseBias):\n\n entity_type = ENTITY_TYPE_OBJECT\n \n format_string = FORMAT_STR_OBJECT\n\nclass SubjectObjectBias(_BaseBias):\n\n entity_type = ENTITY_TYPE_SUBJECTOBJECT\n \n format_string = FORMAT_STR_SUBJECTOBJECT\n \n \n \n \n \n \n \n \n" }, { "alpha_fraction": 0.7523809671401978, "alphanum_fraction": 0.7523809671401978, "avg_line_length": 34, "blob_id": "a6e704907d6452c58a8ebf2456f216f50160faf4", "content_id": "52b390f506337b8b7fef12287b254a20f114033b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 65, "num_lines": 3, "path": "/code/adapter/demo/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The demo application showing the possibilities of Unresyst on \na simple example of a shoe e-shop.\n\"\"\"\n" }, { "alpha_fraction": 0.5544632077217102, "alphanum_fraction": 0.5560361742973328, "avg_line_length": 30.33333396911621, "blob_id": "5ebfcc473dcb3e61526b2323283908dc81c385ad", "content_id": "e9e5c5a3660dc082cb5cf53b0b46bc019c456c48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2543, "license_type": "no_license", "max_line_length": 91, "num_lines": 81, "path": "/code/adapter/unresyst/models/evaluation.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Models related to recommender evaluation\"\"\"\n\nfrom django.db import models\n\nclass BaseEvaluationPair(models.Model):\n \"\"\"An abstract base class for a test set pair. To be used for\n testing the correctness of the predicted expectancy.\n \n The subclass has to have attributes:\n - subj - a foreign key to subject\n - obj - a foreign key to object \n \"\"\" \n \n obtained_expectancy = models.FloatField(null=True)\n \"\"\"The predicted expectancy obtained by the predict_relationship recommender\n method.\n \"\"\"\n \n expected_expectancy = models.FloatField()\n \"\"\"The expected expectancy value for the pair\"\"\"\n \n is_successful = models.NullBooleanField(null=True)\n \"\"\"A sign whether the pair prediction was successful\"\"\"\n \n class Meta:\n abstract = True\n app_label = 'unresyst'\n\n def __unicode__(self):\n \"\"\"Return a textual representation.\"\"\" \n \n return u\"%s - %s: exp: %f, got: %f\" % \\\n (self.subj, self.obj, self.expected_expectancy, self.obtained_expectancy or -1)\n \n @classmethod\n def export(cls, f):\n \"\"\"Export evaluation pairs to a csv file of the given name.\n \n @type f: file\n @param f: open file to write to \n \"\"\" \n i = 0\n f.write(\"# userId, itemId\\n\")\n \n # loop through the pairs, \n for subj_id, obj_id in cls.objects.values_list('subj__pk', 'obj__pk'):\n \n # create the line\n linestr = \"%s,%s\\n\" % (subj_id, obj_id) \n \n # write it to the file\n f.write(linestr)\n \n i += 1\n \n print \" %d evaluation pairs exported\" % i\n \n # to be implemented by subclasses\n # \n \n @classmethod\n def select(cls, i=0):\n \"\"\"Select the pairs for a validation, save them to the database and\n remove them from the system data. The method is called in every \n iteration.\n \n To be implemented by the subclass.\n\n @type i: int\n @param i: the number of iteration of the validation (useful when\n cross-validation is done)\n \"\"\"\n raise NotImplementedError() \n\n def get_prediction_success(self):\n \"\"\"Count whether the pair was successful. The obtained expectancy has\n to be filled first.\n \n To be implemented by the subclass.\n \"\"\" \n raise NotImplementedError()\n \n" }, { "alpha_fraction": 0.5600860714912415, "alphanum_fraction": 0.5629558563232422, "avg_line_length": 39.66829299926758, "blob_id": "3444088d69f1d0c78a17e8b5561cba93a6144850", "content_id": "a1a2fb041241abef63a71ce3ab40ccce1c4753de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8363, "license_type": "no_license", "max_line_length": 96, "num_lines": 205, "path": "/code/adapter/unresyst/aggregator/linear_aggregator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The classes of the aggregator package:\n - LinearAggregator: everything is combined linearly\n - CombiningAggregator: uses the Combinator class for aggregating\n\"\"\"\n\nfrom django.db.models import Count\nfrom django.db.models import Q\n\n\nfrom base import BaseAggregator\nfrom unresyst.models.abstractor import RelationshipInstance, \\\n PredictedRelationshipDefinition\nfrom unresyst.models.aggregator import AggregatedRelationshipInstance, \\\n AggregatedBiasInstance\nfrom unresyst.models.common import SubjectObject\nfrom unresyst.exceptions import InvalidParameterError\n\nclass LinearAggregator(BaseAggregator):\n \"\"\"The class aggregating rule/relationship instances to one for each pair\n of entities. \n \n It ignores the predicted_relationship instances.\n \n A better aggregator could add the positive and subtract the negative \n expectances somehow.\n \"\"\"\n\n def aggregate_rules_relationships(self, recommender_model):\n \"\"\"For documentation see the base class.\n \n Linearly combines the rule and relationship instances.\n \n The descriptions of the aggregates are made by joining the descriptions\n of the aggregated instances. The descriptions are ordered by the expectancy\n of their owners, the highest expectancy comes first.\n \"\"\"\n \n # if there's something in the database for the recommender\n # throw an error\n if AggregatedRelationshipInstance.objects\\\n .filter(recommender=recommender_model).exists():\n \n raise InvalidParameterError(\n message=\"There're unexpected aggregated instances for the recommender.\", \n recommender=recomender_model,\n parameter_name=\"recommender_model\", \n parameter_value=recommender_model)\n\n # aggregate it\n # \n \n # take all rule/relationship instances, that don't belong \n # to the predicted_relationship\n # order them by the first and the second\n predicted_def = PredictedRelationshipDefinition.objects.get(\n recommender=recommender_model)\n instance_qs = RelationshipInstance.objects\\\n .exclude(definition=predicted_def)\\\n .filter(definition__recommender=recommender_model)\\\n .order_by('subject_object1__id', 'subject_object2__id')\n \n # if there's nothing to aggregate, schluss\n if not instance_qs:\n return\n \n first_inst = instance_qs[0]\n \n # continuously built aggregated instance\n # initialize it wit the first instance\n cont_inst = AggregatedRelationshipInstance(\n subject_object1=first_inst.subject_object1,\n subject_object2=first_inst.subject_object2, \n relationship_type=first_inst.definition.as_leaf_class().relationship_type,\n recommender=recommender_model)\n\n exp = first_inst.get_expectancy()\n exp_sum = exp\n count = 1 \n # a list of pairs (expectancy, description) \n desc_list = [(exp, first_inst.description), ]\n \n # go through the rel instances \n for instance in instance_qs.exclude(pk=first_inst.pk).iterator(): \n \n # if the pair has chnged from the last pair, save what we've got and \n # start anew\n if cont_inst.subject_object1 <> instance.subject_object1 \\\n or cont_inst.subject_object2 <> instance.subject_object2:\n \n # count the average expectancy\n cont_inst.expectancy = float(exp_sum) / count\n \n # sort the description list by expectancy and join it\n desc_list.sort(key=lambda pair: pair[0], reverse=True) \n cont_inst.description = ' '.join([desc for x, desc in desc_list])\n\n # save the current instance\n cont_inst.save()\n \n # start a new continuously aggregated instance\n cont_inst = AggregatedRelationshipInstance(\n subject_object1=instance.subject_object1,\n subject_object2=instance.subject_object2, \n relationship_type=instance.definition.as_leaf_class().relationship_type,\n recommender=recommender_model)\n \n exp = instance.get_expectancy()\n exp_sum = exp\n count = 1 \n \n # a list of pairs (expectancy, description) \n desc_list = [(exp, instance.description), ]\n \n # otherwise aggregate \n else: \n exp = instance.get_expectancy() \n exp_sum += exp\n count += 1\n \n desc_list.append((exp, instance.description))\n \n \n # count and save the last we have\n # \n \n # count the average expectancy\n cont_inst.expectancy = float(exp_sum) / count\n \n # sort the description list by expectancy and join it\n desc_list.sort(key=lambda pair: pair[0], reverse=True) \n cont_inst.description = ' '.join([desc for x, desc in desc_list])\n\n # save the last instance\n cont_inst.save()\n \n print \" %d rule/relationship aggregates created\" % \\\n AggregatedRelationshipInstance.objects.filter(recommender=recommender_model).count()\n \n\n def aggregate_biases(self, recommender_model):\n \"\"\"For documentation see the base class.\n \n Linearly combines the bias instances.\n \n The descriptions of the aggregates are made by joining the descriptions\n of the aggregated instances. The descriptions are ordered by the expectancy\n of their owners, the highest expectancy comes first.\n \"\"\"\n \n # if there's something in the database for the recommender\n # throw an error\n if AggregatedBiasInstance.objects\\\n .filter(recommender=recommender_model).exists():\n \n raise InvalidParameterError(\n message=\"There're unexpected aggregated instances for the recommender.\", \n recommender=recommender_model,\n parameter_name=\"recommender_model\", \n parameter_value=recommender_model)\n\n # aggregate it\n # \n \n # take all subjectobjects that have some biases\n qs_biased_so = SubjectObject.objects\\\n .filter(recommender=recommender_model)\\\n .annotate(num_bias=Count('biasinstance'))\\\n .filter(num_bias__gt=0)\n \n count = 0\n \n # go through the biased subjectobjects\n for so in qs_biased_so.iterator(): \n \n # a list of pairs (expectancy, description) \n desc_list = [] \n \n exp_sum = 0\n # go through their biases\n for bias in so.biasinstance_set.all():\n\n # count the expectancy for the bias\n exp = bias.get_expectancy()\n \n exp_sum += exp\n \n desc_list.append((exp, bias.description))\n\n # count the average expectancy\n avg_exp = float(exp_sum) / so.biasinstance_set.count()\n \n # sort the description list by expectancy and join it\n desc_list.sort(key=lambda pair: pair[0], reverse=True) \n desc = ' '.join([d for x, d in desc_list])\n \n # create and save the model\n AggregatedBiasInstance.objects.create(\n expectancy=avg_exp,\n subject_object=so,\n recommender=recommender_model,\n description=desc\n )\n \n print \" %d bias aggregates created\" % \\\n AggregatedBiasInstance.objects.filter(recommender=recommender_model).count()\n \n\n" }, { "alpha_fraction": 0.5359686017036438, "alphanum_fraction": 0.5438196063041687, "avg_line_length": 32.28571319580078, "blob_id": "f28cfdf2ed5c979ea0bbe9bd739c6a0fb1f2cf27", "content_id": "04fb8d402910bbd2313c1f781bc9bdc4d8f0beb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10954, "license_type": "no_license", "max_line_length": 133, "num_lines": 329, "path": "/UnresystCD/code/adapter/demo/recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The recommender used in the parent system \n(the unresyst.BaseRecommender subclass)\n\"\"\"\nfrom django.db.models import Count\n\nfrom unresyst import *\nfrom models import * \nfrom unresyst.algorithm import *\nfrom unresyst.compilator import *\nfrom unresyst.aggregator import *\nfrom unresyst.combinator import *\n\n# helper functions: \n#\n \ndef _get_keyword_set(o):\n \"\"\"Get set of keywords for the given shoepair\n \"\"\"\n return set([kw.word for kw in o.keywords.all()]) \n\ndef _keyword_set_similarity(o1, o2):\n \"\"\"A function measuring similarity of keywords for shoepairs o1 and o2.\n \n Counted as the size of the intersection divided by the size of the \n smaller set of keywords.\n \"\"\"\n # make sets of the keywords\n keyword_set1 = _get_keyword_set(o1)\n keyword_set2 = _get_keyword_set(o2)\n \n # the intersection of the keywords\n keyword_intersection = keyword_set1.intersection(keyword_set2)\n \n # size of the smaller set\n min_len = min(len(keyword_set1), len(keyword_set2))\n \n # they have some keywords in common, so min_len is never 0\n \n #the final measure\n return float(len(keyword_intersection))/min_len\n\ndef _likes_shoes_generator():\n \"\"\"A generator for the user likes shoes relationship\"\"\" \n for u in User.objects.iterator():\n for s in u.likes_shoes.iterator():\n yield (u, s)\n\ndef _shoelikers():\n \"\"\"A generator returning users liking at least one shoepair\"\"\"\n for u in User.objects.annotate(num_shoes=Count('likes_shoes')).filter(num_shoes__gte=1):\n yield u\n\n\ndef _popular_shoes():\n \"\"\"A generator returning shoes that are liked by at least one user\"\"\"\n for s in ShoePair.objects.annotate(num_likers=Count('likers')).filter(num_likers__gte=1):\n yield s\n\ndef _viewed_shoes_generator():\n \"\"\"A generator for the user has viewed shoes relationship\"\"\" \n for u in User.objects.iterator():\n for s in u.viewed_shoes.iterator():\n yield (u, s)\n\ndef _south_generator():\n \"\"\"A generator for the rule: if user is from south don't recommend \n him/her winter shoes.\n \"\"\"\n for u in User.objects.filter(home_city__in_south=True).iterator():\n for s in ShoePair.objects.filter(for_winter=True).iterator():\n yield (u, s)\n\n# the recommender:\n# \n\nclass ShoeRecommender(Recommender):\n \"\"\"A BaseRecommender subclass holding all domain-specific data\"\"\"\n\n name = \"Shoe Recommender\"\n \"\"\"The name\"\"\" \n \n subjects = User.objects\n \"\"\"The objects to who the recommender will recommend.\"\"\"\n \n objects = ShoePair.objects\n \"\"\"The objects that will be recommended.\"\"\" \n\n predicted_relationship = PredictedRelationship( \n name=\"User likes shoes.\",\n condition=lambda s, o: \n o in s.likes_shoes.all(), \n description=\"\"\"User %(subject)s likes shoes %(object)s.\"\"\",\n generator=_likes_shoes_generator\n )\n \"\"\"The relationship that will be predicted\"\"\"\n \n relationships = (\n \n # if the user has viewed the shoes it's a sign of preference\n SubjectObjectRelationship(\n name=\"User has viewed shoes.\",\n \n condition=lambda s, o:\n o in s.viewed_shoes.all(), \n\n is_positive=True, \n \n weight=0.4, \n \n description=\"User %(subject)s has viewed %(object)s.\",\n \n generator=_viewed_shoes_generator\n ),\n \n # if the user is from the same city as the shoe manufacturer, he might like it\n SubjectObjectRelationship(\n name='User lives in the same city as the shoe manufacturer.',\n \n condition=lambda s, o:\n o.manufacturer and s.home_city and o.manufacturer.home_city == s.home_city,\n\n is_positive=True, \n \n weight=0.1, \n \n description=\"User %(subject)s is from the same city as the manufacturer of %(object)s.\"\n ),\n \n # if users live in the same city, they are considered similar\n SubjectSimilarityRelationship(\n name=\"Users live in the same city.\",\n \n condition=lambda s1, s2:\n s1.home_city and s1.home_city == s2.home_city, \n \n is_positive=True, \n \n weight=0.3, \n \n description=\"Users %(subject1)s and %(subject2)s live in the same city.\"\n ),\n \n # if shoes were made by the same manufacturer, they are considered \n # similar\n ObjectSimilarityRelationship(\n name=\"Shoes were made by the same manufacturer.\",\n \n condition=lambda o1, o2:\n o1.manufacturer and o1.manufacturer == o2.manufacturer,\n \n is_positive=True, \n \n weight=0.1,\n \n description=\"Shoes %(object1)s and %(object2)s were made by\" + \\\n \" the same manufacturer.\"\n ) \n )\n \"\"\"Relationships among the subjects and objects in the domain\"\"\"\n \n \n rules = (\n # if user lives on south, don't recommend him winter shoes \n SubjectObjectRule( \n name=\"Don't recommend winter shoes for southern users.\",\n # is the user from a southern city and shoes for winter?\n condition=lambda s, o: \n s.home_city.in_south and o.for_winter, \n \n is_positive=False,\n \n weight=0.85, \n \n confidence=lambda s, o: 1, \n \n description=\"%(subject)s is from south, so %(object)s can't \" + \n \"be recommended to him/her.\",\n\n generator=_south_generator\n ),\n \n # if users are the same age +- year they are similar\n SubjectSimilarityRule(\n name=\"Users with similar age.\",\n \n # both users have given their age\n condition=lambda s1, s2: \n s1.age and s2.age and s1.age -1 <= s2.age <= s2.age + 1,\n \n is_positive=True, \n \n weight=0.2,\n \n # a magic linear confidence function\n confidence=lambda s1, s2: \n 1 - 0.25 * abs(s1.age - s2.age),\n \n description=\"Users %(subject1)s and %(subject2)s are about \" + \n \"the same age.\"\n ),\n \n # if shoes have common keywords, they are similar.\n ObjectSimilarityRule(\n name=\"Shoes with common keywords.\",\n \n # shoes have some common keywords, if both empty, it's false\n condition=lambda o1, o2: \n bool(_get_keyword_set(o1).intersection(_get_keyword_set(o2))),\n \n is_positive=True,\n \n weight=0.4,\n \n # the size of the intersection / the size of the smaller set\n confidence=_keyword_set_similarity,\n \n description=\"The shoe pairs %(object1)s and %(object2)s \" + \n \"share some keywords.\"\n ),\n \n # explicit rating\n ExplicitSubjectObjectRule(\n name=\"Shoe rating.\",\n \n condition=None,\n \n description=\"User %(subject)s has rated %(object)s.\",\n \n # all pairs user, rated shoes\n generator=lambda: [(r.user, r.shoe_pair) for r in ShoeRating.objects.all()],\n \n # the number of stars divided by five\n expectancy=lambda s, o:float(ShoeRating.objects.get(user=s, shoe_pair=o).stars) / 5,\n ),\n )\n \"\"\"Rules that can be applied to the domain\"\"\"\n \n cluster_sets = (\n # shoe category cluster\n ObjectClusterSet(\n\n name=\"Shoe category cluster set.\",\n\n weight=0.3,\n \n filter_entities=ShoePair.objects.filter(category__isnull=False),\n \n get_cluster_confidence_pairs=lambda shoe: ((shoe.category.name, 1),),\n \n description=\"%(object)s belong to the %(cluster)s category.\",\n ),\n \n # searched keywords cluster\n SubjectClusterSet(\n name=\"Keyword search cluster set.\",\n \n weight=0.4,\n \n filter_entities=User.objects.filter(words_searched__isnull=False).distinct(),\n \n # clusters are words, confidence is 1/number of searched words for user\n get_cluster_confidence_pairs=lambda user: [(w.word, 1.0/user.words_searched.count()) for w in user.words_searched.all()],\n \n description=\"%(subject)s has searched for the word %(cluster)s.\",\n ),\n )\n \n biases = (\n # people liking many shoepairs are more likely to like some more\n SubjectBias(\n name=\"Users liking many shoes.\",\n \n description=\"User %(subject)s likes many shoe pairs.\",\n \n weight=0.4, \n \n is_positive=True,\n \n generator=_shoelikers, \n \n confidence=lambda user: float(user.likes_shoes.count())/3\n ),\n \n # multiply liked shoes are more likely to be liked\n ObjectBias(\n name=\"Popular shoes\",\n \n description=\"Shoe pair %(object)s is popular\",\n \n weight=0.8,\n \n is_positive=True,\n \n generator=_popular_shoes,\n \n confidence=lambda shoe: float(shoe.likers.count())/3\n ),\n )\n\n random_recommendation_description = \"Recommending a random shoe pair to the user.\"\n \n algorithm = AggregatingAlgorithm(\n inner_algorithm=CompilingAlgorithm(\n inner_algorithm=SimpleAlgorithm(\n inner_algorithm=None\n ),\n compilator=GetFirstCompilator()\n ),\n aggregator=LinearAggregator()\n )\n \"\"\"The most basic algorithm is used\"\"\"\n\nShoeRecommender.explicit_rating_rule = ShoeRecommender.rules[3]\n\nclass AverageRecommender(ShoeRecommender):\n \n name = \"Advanced shoe recommender\"\n #remove_predicted_from_recommendations = False\n \n algorithm = AggregatingAlgorithm(\n inner_algorithm=CompilingAlgorithm(\n inner_algorithm=SimpleAlgorithm(\n inner_algorithm=None\n ),\n compilator=CombiningCompilator(combinator=AverageCombinator())\n ),\n aggregator=CombiningAggregator(combinator=AverageCombinator())\n )\n \"\"\"The normal algorithm is used\"\"\"\n\n\n\n" }, { "alpha_fraction": 0.735551655292511, "alphanum_fraction": 0.735551655292511, "avg_line_length": 30.66666603088379, "blob_id": "c3566ae29c10998605ef4d74844566ae225b99e5", "content_id": "c9c8dd13df97c1175d2cf4da2fcb1d6afa953e0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/UnresystCD/code/adapter/travel/mahout_recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The representation of the Mahout recommender in Unresyst.\"\"\"\n\nfrom unresyst.recommender.external_recommender import ExternalRecommender\nfrom recommender import OrderTourRecommender\n\nfrom models import *\n\nclass MahoutOrderTourRecommender(ExternalRecommender, OrderTourRecommender):\n \"\"\"An external order tour recommender\"\"\"\n \n name = \"Mahout order tour recommender\"\n \"\"\"The name\"\"\"\n \n subjects = User.objects\n \"\"\"The objects to who the recommender will recommend.\"\"\"\n \n objects = Tour.objects\n \"\"\"The objects that will be recommended.\"\"\" \n" }, { "alpha_fraction": 0.6030217409133911, "alphanum_fraction": 0.6056657433509827, "avg_line_length": 31.300613403320312, "blob_id": "66a0c43d7c3b33d128d124f8c9c4d591f58f0069", "content_id": "503f619f12d00e51283694f39285e255e14f54a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5295, "license_type": "no_license", "max_line_length": 93, "num_lines": 163, "path": "/code/adapter/unresyst/abstractor/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The module defines base class for the abstractor package.\"\"\"\n\nclass BaseAbstractor(object):\n \"\"\"The base (abstract) class for all abstractors. Defines the interface.\"\"\"\n \n # Build phase:\n #\n \n \n def create_subjectobjects(self, recommender_model, subjects, objects):\n \"\"\"Create abstract representation of subjects and objects.\n \n @type recommender_model: models.Recommender\n @param recommender_model: the recommender model instance\n \n @type subjects: django.db.models.manager.Manager\n @param subjects: the manager above the subject model\n \n @type objects: django.db.models.manager.Manager\n @param objects: the manager above the object model\n \n \"\"\"\n pass\n \n \n \n def create_predicted_relationship_instances(self, predicted_relationship):\n \"\"\"Create the instances of the predicted relationship.\n \n Create bindings on subject-object pairs for pairs between which there's\n the predicted relationship.\n \n @type predicted_relationship: rules.Relationship\n @param predicted_relationship: the definition of relationship to be predicted \n \"\"\"\n pass\n\n \n \n def create_relationship_instances(self, relationships):\n \"\"\"Create the instances of relationships relevant for recommendation.\n \n Create bindings on pairs of subject/objects, for pairs between where \n the relationship is. \n \n @type relationships: list of rules._WeightedRelationship subclass instances\n @param relatioships: recommender relationships to be instantiated \n\n @raise ConfigurationError: if the weight of some relatioship is\n outside [0, 1] \n \"\"\"\n pass\n\n \n \n def create_rule_instances(self, rules):\n \"\"\"Create the instances of rules.\n \n Create bindings on pairs of subject/objects, for pairs between where \n the rule applies.\n\n @type rules: list of rules._BaseRule subclass instances\n @param rules: recommender rules to be instantiated\n \n @raise ConfigurationError: if the weight of some rule is outside [0, 1]\n or some confidence function returns a value outside [0, 1].\n \"\"\"\n pass\n\n \n \n \n def create_clusters(self, cluster_sets):\n \"\"\"Create clusters defined in the given sets\n \n Crates the cluster sets in the database, their clusters, bindings \n of subjectobjects to the clusters.\n \n @type cluster_sets: a list of clusters.BaseClusterSet subclass instances\n @param cluster_sets: recommender cluster sets to be evaluated\n \n @raise ConfigurationError: if the weight of some cluster set is outside [0, 1]\n or some cluster member confidence is outside [0, 1].\n \"\"\"\n pass\n \n \n def create_biases(self, biases):\n \"\"\"Create biases defined by the user.\n \n Creates bias definitions and instances in the database.\n \n @type biases: a list of bias._BaseBias instances\n @param biases: biases to be evaluated\n \n @raise ConfigurationError: if the weight of some cluster set is outside [0, 1]\n or some cluster member confidence is outside [0, 1]. \n \"\"\"\n pass\n\n \n # Update phase:\n # \n \n \n def add_subject(self, recommender, subject):\n \"\"\"Add the subject to the abstract subjects, to the relationship and\n rule instances.\n \n @rtype: uvidime\n @return: the changes performed on the rule and relationship instances.\n \"\"\"\n pass\n \n \n def add_object(self, recommender, object_):\n \"\"\"Add the object to the abstract objects, to the relationship and\n rule instances.\n \n @rtype: uvidime\n @return: the changes performed on the rule and relationship instances.\n \"\"\"\n pass\n \n \n def update_subject(self, recommender, subject):\n \"\"\"Update the subject in the abstract subjects, in the relationship and\n rule instances.\n \n @rtype: uvidime\n @return: the changes performed on the rule and relationship instances.\n \"\"\"\n pass\n \n \n def update_object(self, recommender, object_):\n \"\"\"Update the object in the abstract objects, in the relationship and\n rule instances.\n \n @rtype: uvidime\n @return: the changes performed on the rule and relationship instances.\n \"\"\"\n pass \n \n \n def remove_subject(self, recommender, subject):\n \"\"\"Remove the subject from the abstract subjects, its relationships and\n rule instances.\n \n @rtype: uvidime\n @return: the changes performed on the rule and relationship instances.\n \"\"\"\n pass\n \n \n def remove_object(self, recommender, object_):\n \"\"\"Update the object from the abstract objects, its relationship and\n rule instances.\n \n @rtype: uvidime\n @return: the changes performed on the rule and relationship instances.\n \"\"\"\n pass \n \n \n" }, { "alpha_fraction": 0.5553778409957886, "alphanum_fraction": 0.5639743804931641, "avg_line_length": 35.445255279541016, "blob_id": "16ac98ed4ffaeb4faf46f8db3777add92cba6db0", "content_id": "e4b8c7014c26aa3d937d4bff79aba92a06a557bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5002, "license_type": "no_license", "max_line_length": 167, "num_lines": 137, "path": "/code/adapter/travel/recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The configuration for the travel agency recommender\"\"\"\nfrom django.db.models import Sum, Count, Avg\n\nfrom unresyst import *\n\nfrom models import *\nfrom constants import *\n\ndef _viewed_profile_confidence(u, t):\n viewed_profs = ViewProfile.objects.filter(tour=t, session__user=u)\n view_count = viewed_profs.count()\n avg_duration = viewed_profs.aggregate(Avg('duration'))['duration__avg']\n \n return min((float(view_count)/6) * avg_duration/160, 1.0)\n\n# predicted bude order\n# remove_predicted_from_recommendations = True\n# podpurny budou ty vtipy\n\nclass OrderTourRecommender(Recommender):\n \"\"\"A recommender for suggesting what tour the user should order.\"\"\" \n\n name = \"Order Tour Recommender\"\n \"\"\"The name\"\"\" \n \n subjects = User.objects\n \"\"\"The objects to who the recommender will recommend.\"\"\"\n \n objects = Tour.objects\n \"\"\"The objects that will be recommended.\"\"\" \n\n random_recommendation_description = \"Recommending a random tour to the user.\"\n\n predicted_relationship = PredictedRelationship( \n name=\"User has ordered the tour.\",\n condition=None, \n description=\"\"\"User %(subject)s has ordered %(object)s.\"\"\",\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) for uid, tid in Order.objects.values_list('session__user', 'tour').distinct()), \n )\n \"\"\"The relationship that will be predicted\"\"\"\n\n \n relationships = ()\n \n rules = ( \n # click = sign of preference\n SubjectObjectRule(\n name='User has clicked on something on the tour profile.',\n weight=0.05,\n condition=None,\n is_positive=True,\n description='User %(subject)s has clicked on something on the %(object)s profile.',\n # pairs that user has clicked on the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in Click.objects.values_list('session__user', 'tour').distinct()),\n # the average is around 3, so take 1/6. so that 3 points to the middle.\n confidence=lambda u, t: min(float(Click.objects.filter(tour=t, session__user=u).count())/6, 1.0),\n ), \n \n # mouse move .. also a sign of preference\n SubjectObjectRule(\n name='User has moved the mouse on the tour profile.',\n weight=0.1,\n condition=None,\n is_positive=True,\n description='User %(subject)s has moved the mouse on %(object)s.',\n # pairs that user has moved on the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in MouseMove.objects.values_list('session__user', 'tour').distinct()),\n confidence=lambda u, t: min(float(MouseMove.objects.filter(tour=t, session__user=u).count())/18, 1.0),\n ),\n \n # view profile \n SubjectObjectRule(\n name='User has viewed the tour profile page.',\n weight=0.1,\n condition=None,\n is_positive=True,\n description='User %(subject)s has viewed %(object)s.',\n # pairs that user has viewed the tour\n generator=lambda: ((User.objects.get(pk=uid), Tour.objects.get(pk=tid)) \\\n for uid, tid in ViewProfile.objects.values_list('session__user', 'tour').distinct()),\n # how many times * how long\n confidence=_viewed_profile_confidence\n ),\n\n \n )\n \n\n biases = (\n # multiply viewed tours\n ObjectBias(\n name=\"Most viewed tours.\",\n \n description=\"Tour %(object)s is much viewed\",\n \n weight=0.3,\n \n is_positive=True,\n \n generator=lambda: Tour.objects.annotate(hh=Count('viewprofile')).filter(hh__gt=2).distinct(),\n \n confidence=lambda t: min(float(t.viewprofile_set.count())/4, 1.0)\n ),\n \n \n # multiply mouse moved tours\n ObjectBias(\n name=\"Most mouse moved tours.\",\n \n description=\"Tour %(object)s is often mouse moved.\",\n \n weight=0.2,\n \n is_positive=True,\n \n generator=lambda: Tour.objects.annotate(hh=Count('mousemove')).filter(hh__gt=6).distinct(),\n \n confidence=lambda t: min(float(t.mousemove_set.count())/12, 1.0)\n ),\n # multiply clicked tours\n ObjectBias(\n name=\"Most clicked tours.\",\n \n description=\"Tour %(object)s is often clicked on.\",\n \n weight=0.5,\n \n is_positive=True,\n \n generator=lambda: Tour.objects.annotate(hh=Count('click')).filter(hh__gt=1).distinct(),\n \n confidence=lambda t: min(float(t.click_set.count())/2, 1.0)\n ),\n \n )\n\n\n\n \n\n" }, { "alpha_fraction": 0.5668229460716248, "alphanum_fraction": 0.580405056476593, "avg_line_length": 45.32362747192383, "blob_id": "6c9ac1aa037d4d4fa059f3f62df8d209180e2318", "content_id": "3a4ab5042f1421a2c4c0f84ee73ebf9d9139d5a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41378, "license_type": "no_license", "max_line_length": 197, "num_lines": 893, "path": "/UnresystCD/code/adapter/unresyst/tests/test_build.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Tests for the building phase.\n\nEach layer has a separate test:\n - recommender\n - abstractor\n - aggregator\n - algorithm \n\nThe tests are always started by running the recommender.build method.\n\"\"\"\n\nfrom nose.tools import eq_, assert_raises, assert_almost_equal\nfrom django.db.models import Q\n\nfrom unresyst import Recommender\nfrom unresyst.models.common import SubjectObject, Recommender as RecommenderModel\nfrom unresyst.models.abstractor import PredictedRelationshipDefinition, \\\n RelationshipInstance, RuleInstance, RuleRelationshipDefinition, ClusterSet, \\\n BiasDefinition, ExplicitRuleDefinition, ExplicitRuleInstance\nfrom unresyst.models.aggregator import AggregatedRelationshipInstance, AggregatedBiasInstance\nfrom unresyst.models.algorithm import RelationshipPredictionInstance \nfrom test_base import TestBuild, TestEntities, DBTestCase, TestBuildAverage\nfrom unresyst.exceptions import ConfigurationError, DescriptionKeyError\nfrom unresyst.recommender.rules import ExplicitSubjectObjectRule\n\nfrom demo.recommender import ShoeRecommender\nfrom demo.models import User, ShoePair\n\nPLACES = 4\n\"\"\"How many places are counted for expectancy accuracy\"\"\"\n\nclass TestRecommender(TestBuild):\n \"\"\"Test case for the recommender class\"\"\"\n \n def test_recommender_created(self):\n \"\"\"Test that the recommender was created during the build\"\"\" \n \n # get it from db\n rec = RecommenderModel.objects.filter(\n class_name=ShoeRecommender.__name__)\n \n # assert there's one \n eq_(rec.count(), 1, \"There should be right one recommender %s.\" \\\n % ShoeRecommender.name)\n \n rec = rec[0]\n \n # assert it has the right name\n eq_(rec.name, ShoeRecommender.name)\n \n # assert the subjects != objects\n eq_(rec.are_subjects_objects, \\\n ShoeRecommender.subjects == ShoeRecommender.objects)\n \n # assert the model is saved in the recommender\n eq_(ShoeRecommender._get_recommender_model(), rec) \n\n def test_cascade_delete(self):\n \"\"\"Test that the rebuild deletes all that should be deleted\"\"\"\n \n # get and delete alice\n a = User.objects.get(name='Alice') \n a.delete()\n \n # build the recommender again\n ShoeRecommender.build()\n \n # assert there's nothing:\n \n # in SubjectObject\n eq_(SubjectObject.objects.filter(name='Alice').count(), 0)\n \n # in RelationshipInstance\n eq_(RelationshipInstance.objects.filter(subject_object1__name='Alice').count(), 0)\n eq_(RelationshipInstance.objects.filter(subject_object2__name='Alice').count(), 0) \n \n # in RuleInstance\n eq_(RuleInstance.objects.filter(subject_object1__name='Alice').count(), 0)\n eq_(RuleInstance.objects.filter(subject_object2__name='Alice').count(), 0)\n \n # in AggregatedRelationshipInstance\n eq_(AggregatedRelationshipInstance.objects.filter(subject_object1__name='Alice').count(), 0)\n eq_(AggregatedRelationshipInstance.objects.filter(subject_object2__name='Alice').count(), 0)\n \n # in RelationshipPredictionInstance\n eq_(RelationshipPredictionInstance.objects.filter(subject_object1__name='Alice').count(), 0)\n eq_(RelationshipPredictionInstance.objects.filter(subject_object2__name='Alice').count(), 0) \n\n\nclass TestAbstractor(TestEntities):\n \"\"\"Testing the abstractor in the build phase\"\"\" \n \n # creating subjectobjects:\n #\n \n def test_create_subjectobjects(self):\n \"\"\"Test creating universal subjects, objects\"\"\"\n \n # get the specific entities\n subjects = User.objects.all()\n objects = ShoePair.objects.all()\n \n universal = SubjectObject.objects.all()\n \n # assert the counts fit\n eq_(subjects.count() + objects.count(), universal.count())\n \n # assert all subjects and objects are there\n for entities, entity_type in ((subjects, 'S'), (objects, 'O')):\n for en in entities:\n\n # the universal representation of entity\n en_un = universal.filter(id_in_specific=en.id, entity_type=entity_type)\n \n # assert there's one \n eq_(en_un.count(), 1, \"There should be right one represenation\" + \\\n \"of %s, there are %d\" % (en, en_un.count()))\n \n en_un = en_un[0] \n \n # assert it has the right properties \n\n # name\n eq_(en_un.name, en.__unicode__())\n \n # recommender\n eq_(en_un.recommender.class_name, ShoeRecommender.__name__)\n\n \n # predicted relationship\n # \n \n def test_create_predicted_relationship_definition(self):\n \"\"\"Test creating the definition of the predicted relationship\"\"\"\n \n # get definitions related to the shoe recommender\n qs = PredictedRelationshipDefinition.remove_subclass_objects().filter(\n recommender=ShoeRecommender._get_recommender_model())\n \n # get how many definitions there are\n # should be one\n eq_(qs.count(), 1)\n \n definition = qs[0]\n \n # test it has the right name\n eq_(definition.name, \"User likes shoes.\")\n\n \n EXPECTED_PREDICTED_RELATIONSHIP_INSTANCES = (\n ('Alice', 'Sneakers', \"User Alice likes shoes Sneakers.\"),\n ('Bob', 'Sneakers', \"User Bob likes shoes Sneakers.\"),\n ('Edgar', 'Rubber Shoes', \"User Edgar likes shoes Rubber Shoes.\"), \n ('Edgar', 'RS 130', \"User Edgar likes shoes RS 130.\"), \n ) \n\n \n def test_create_predicted_relationship_instances(self):\n \"\"\"Test creating instances of the predicted relationship.\"\"\"\n \n # get the relationship definition\n definition = PredictedRelationshipDefinition.remove_subclass_objects().get( \n recommender=ShoeRecommender._get_recommender_model())\n \n # get instances of the predicted relationship\n instances = RelationshipInstance.remove_subclass_objects().filter(definition=definition)\n \n eq_(len(self.EXPECTED_PREDICTED_RELATIONSHIP_INSTANCES), instances.count(),\n \"Expected: %d, obtained: %d. Obtained: %s\" % \\\n (len(self.EXPECTED_PREDICTED_RELATIONSHIP_INSTANCES), instances.count(), instances))\n \n for expected_data in self.EXPECTED_PREDICTED_RELATIONSHIP_INSTANCES:\n\n # instances that have either one or another order of the subjobjects\n rel_instance = instances.filter(\n subject_object1=self.universal_entities[expected_data[0]], \n subject_object2=self.universal_entities[expected_data[1]]\n )\n \n # there should be one\n eq_(rel_instance.count(), 1)\n \n instance = rel_instance[0]\n \n # test it has the right description\n eq_(instance.description, expected_data[2]) \n\n\n # relationships:\n #\n\n def test_create_relationship_definitions(self):\n \"\"\"Test creating the definitions of the relationships\"\"\"\n \n self._test_definitions(ShoeRecommender.relationships)\n\n \n EXPECTED_RELATIONSHIP_DICT = {\n # relationship name, list of instances - (entity1, entity2, description)\n # for symmetric relationships the description is a pair - both orderings\n \"User has viewed shoes.\": (\n ('Alice', 'Rubber Shoes', \"User Alice has viewed Rubber Shoes.\"),\n ('Bob', 'Sneakers', \"User Bob has viewed Sneakers.\"),\n ('Cindy', 'Rubber Shoes', \"User Cindy has viewed Rubber Shoes.\"),\n ),\n \"Users live in the same city.\": (\n ('Alice', 'Bob', (\"Users Alice and Bob live in the same city.\",\n \"Users Bob and Alice live in the same city.\")),\n ('Cindy', 'Daisy', (\"Users Cindy and Daisy live in the same city.\",\n \"Users Daisy and Cindy live in the same city.\")),\n ),\n \"Shoes were made by the same manufacturer.\": (\n ('Sneakers', 'Rubber Shoes', \n (\"Shoes Sneakers and Rubber Shoes were made by the same manufacturer.\",\n \"Shoes Rubber Shoes and Sneakers were made by the same manufacturer.\")),\n ),\n 'User lives in the same city as the shoe manufacturer.': (\n ('Alice', 'Rubber Shoes', \"User Alice is from the same city as the manufacturer of Rubber Shoes.\"),\n ('Bob', 'Rubber Shoes', \"User Bob is from the same city as the manufacturer of Rubber Shoes.\"),\n ('Alice', 'Sneakers', \"User Alice is from the same city as the manufacturer of Sneakers.\"),\n ('Bob', 'Sneakers', \"User Bob is from the same city as the manufacturer of Sneakers.\"),\n ('Cindy', 'RS 130', \"User Cindy is from the same city as the manufacturer of RS 130.\"),\n ('Daisy', 'RS 130', \"User Daisy is from the same city as the manufacturer of RS 130.\")\n ),\n }\n \n \n def test_create_relationship_instances(self):\n \"\"\"Test creating instances of all the defined relationships.\"\"\"\n \n self._test_instances(\n def_list=ShoeRecommender.relationships,\n expected_dict=self.EXPECTED_RELATIONSHIP_DICT,\n instance_manager=RelationshipInstance.objects) \n \n # rules:\n #\n \n def test_create_rule_definitions(self):\n \"\"\"Test creating the definitions of the rules\"\"\"\n \n # explicit rules are tested separately\n rules = filter(lambda r: r.__class__ != ExplicitSubjectObjectRule, ShoeRecommender.rules)\n self._test_definitions(rules)\n\n\n EXPECTED_RULE_DICT = {\n # rule name, list of instances - (entity1, entity2, description, expectancy)\n # for symmetric rules the description is a pair - both orderings\n \"Don't recommend winter shoes for southern users.\": (\n ('Alice', 'RS 130', \"Alice is from south, so RS 130 can't be recommended to him/her.\", 1),\n ('Bob', 'RS 130', \"Bob is from south, so RS 130 can't be recommended to him/her.\", 1),\n ),\n \"Users with similar age.\": (\n ('Alice', 'Bob', (\"Users Alice and Bob are about the same age.\", \n \"Users Bob and Alice are about the same age.\"), 0.75),\n ('Edgar', 'Fionna', (\"Users Edgar and Fionna are about the same age.\", \n \"Fionna and Edgar are about the same age.\"), 1),\n ),\n \"Shoes with common keywords.\": (\n ('Rubber Shoes', 'Sneakers', (\"The shoe pairs Rubber Shoes and Sneakers share some keywords.\",\n \"The shoe pairs Sneakers and Rubber Shoes share some keywords.\"), 1),\n ('Sneakers', 'Design Shoes', (\"The shoe pairs Sneakers and Design Shoes share some keywords.\",\n \"The shoe pairs Design Shoes and Sneakers share some keywords.\"), 0.5), \n ),\n } \n\n def test_create_rule_instances(self):\n \"\"\"Test creating instances of all the defined rules.\"\"\"\n\n # explicit rules are tested separately\n rules = filter(lambda r: r.__class__ != ExplicitSubjectObjectRule, ShoeRecommender.rules)\n \n self._test_instances(\n def_list=rules,\n expected_dict=self.EXPECTED_RULE_DICT,\n instance_manager=RuleInstance.objects)\n \n # auxiliary functions:\n # \n \n def _test_instances(self, def_list, expected_dict, instance_manager):\n \"\"\"Test creating instances.\n \n @type def_list: list of Rule/Relationship definitions \n @param def_list: a list of definitions to check\n \n @type expected_dict: dict str: tuple\n @param expected_dict: dictionary of expected instance attributes\n \n @type instance_manager: django.db.models.manager.Manager\n @param instance_manager: the manager above rule/relationship instances\n \"\"\"\n # get definitions related to the shoe recommender\n qs = RuleRelationshipDefinition.objects.filter(\n recommender=ShoeRecommender._get_recommender_model()) \n \n # for each defined type there should be a model definition in db\n for r in def_list:\n\n # get the expected data\n expected_relationships = expected_dict[r.name] \n\n # get the definition model by name (shouldn't throw an error)\n definition = qs.get(name=r.name)\n \n instances = instance_manager.filter(definition=definition)\n \n # expect there are as many relationship instances as expected\n eq_(instances.count(), len(expected_relationships))\n \n for expected_data in expected_relationships:\n \n # instances that have either one or another order of the subjobjects\n rel_instance = instances.filter(\n Q(subject_object1=self.universal_entities[expected_data[0]], \n subject_object2=self.universal_entities[expected_data[1]]) | \\\n Q(subject_object1=self.universal_entities[expected_data[1]], \n subject_object2=self.universal_entities[expected_data[0]])\n ) \n \n # there should be one\n eq_(rel_instance.count(), 1)\n \n instance = rel_instance[0]\n \n # assert the right order in the relationship\n if instance.definition.as_leaf_class().relationship_type == 'S-O': \n eq_(instance.subject_object1.entity_type, 'S')\n eq_(instance.subject_object2.entity_type, 'O')\n else:\n assert instance.subject_object1.id < instance.subject_object2.id\n \n # test it has the right description\n # the diferentiation for symmetric relationships - the order \n # of the entities in the description can be arbitrary\n if r.is_symmetric:\n desc_tuple = expected_data[2]\n assert instance.description == desc_tuple[0] or \\\n instance.description == desc_tuple[1], \\\n \"The description '%s' is wrong. Should be '%s' or '%s'\" % \\\n (instance.description, desc_tuple[0], desc_tuple[1])\n else:\n eq_(instance.description, expected_data[2]) \n \n # for rules \n if len(expected_data) == 4:\n eq_(instance.confidence, expected_data[3], \n \"Got confidence: %f, expected: %f for instance: %s\" % (instance.confidence, expected_data[3], instance))\n \n\n def _test_definitions(self, def_list):\n \"\"\"Test the definitions.\n \n @type def_list: an iterable of RuleRelationshipDefinition model\n @param def_list: list of definitions to check.\n \"\"\"\n # get definitions related to the shoe recommender\n qs = RuleRelationshipDefinition.objects.filter(\n recommender=ShoeRecommender._get_recommender_model()) \n \n # explicit rules are tested separately\n rules = filter(lambda r: r.__class__ != ExplicitSubjectObjectRule, ShoeRecommender.rules)\n \n # assert the number of definitions is right\n eq_(qs.count(), len(ShoeRecommender.relationships) + len(rules))\n \n # for each defined type there should be a model definition in db\n for r in def_list:\n\n # get the definition model by name (shouldn't throw an error)\n rmodel = qs.get(name=r.name)\n \n # assert the positiveness, weight and relationship type are right\n eq_(rmodel.is_positive, r.is_positive)\n eq_(rmodel.weight, r.weight) \n eq_(rmodel.relationship_type, r.relationship_type) \n\n\n EXP_CLUSTER_SETS = {\n \"Shoe category cluster set.\": \n (\n {'For Sports': (('RS 130', 1, \"RS 130 belong to the For Sports category.\"), \n (\"Octane SL\", 1, \"Octane SL belong to the For Sports category.\")),\n 'Casual': ((\"Design Shoes\", 1, \"Design Shoes belong to the Casual category.\"), \n (\"Sneakers\", 1, \"Sneakers belong to the Casual category.\")),},\n 'O', 0.3),\n \"Keyword search cluster set.\":\n (\n {'Sporty': (\n ('Bob', 0.5, \"Bob has searched for the word Sporty.\"),\n ),\n 'Comfortable': (\n ('Bob', 0.5, \"Bob has searched for the word Comfortable.\"),\n ('Cindy', 0.5, \"Cindy has searched for the word Comfortable.\"),\n ),\n 'Cool':(\n ('Cindy', 0.5, \"Cindy has searched for the word Cool.\"),\n ),\n },\n 'S', 0.4),\n }\n \"\"\"A dictionary: cluster set name: contained clusters, entity type, weight,\n the members: object name, confidence, description.\"\"\"\n \n def test_clusters(self):\n \"\"\"Test that the clusters are created as expected\"\"\"\n \n # get cluster sets from db\n cluster_sets = ClusterSet.objects.filter(\n recommender=ShoeRecommender._get_recommender_model())\n \n # assert there are as many as expected\n eq_(len(self.EXP_CLUSTER_SETS), cluster_sets.count())\n \n for cs in cluster_sets:\n\n # get the expected contents\n clusters, ent_type, weight = self.EXP_CLUSTER_SETS[cs.name]\n \n eq_(ent_type, cs.entity_type)\n eq_(weight, cs.weight)\n eq_(len(clusters), cs.cluster_set.count())\n \n # go through the clusters\n for c in cs.cluster_set.all():\n\n cluster_members = clusters[c.name]\n \n eq_(len(cluster_members), c.clustermember_set.count())\n \n for cm in c.clustermember_set.all():\n # expect the combination of entity and confidence is \n # in the expected\n assert (cm.member.name, cm.confidence, cm.description) in cluster_members, \\\n \"The member '%s' with confidence '%f' and description '%s' isn't one of the expected %s\" % (\n cm.member, cm.confidence, cm.description, cluster_members)\n\n EXP_BIASES = {\n \"Users liking many shoes.\": \n {\n 'Alice': (0.333333, \"User Alice likes many shoe pairs.\"),\n 'Bob': (0.333333, \"User Bob likes many shoe pairs.\"),\n 'Edgar': (0.66666, \"User Edgar likes many shoe pairs.\"),\n },\n \"Popular shoes\":\n {\n 'Sneakers': (0.6666666, \"Shoe pair Sneakers is popular\"),\n 'Rubber Shoes': (0.333333, \"Shoe pair Rubber Shoes is popular\"),\n 'RS 130': (0.333333, \"Shoe pair RS 130 is popular\"),\n },\n }\n \"\"\"A dictionary: cluster set name: contained clusters, entity type, weight\"\"\" \n \n def test_bias(self):\n \"\"\"Test that biases were created as expected\"\"\"\n \n # get bias definitions from db\n bias_defs = BiasDefinition.objects.filter(\n recommender=ShoeRecommender._get_recommender_model())\n \n # assert there are as many as expected\n eq_(len(self.EXP_BIASES), bias_defs.count())\n \n for bias_def in bias_defs:\n \n # assert it's in the expected\n assert self.EXP_BIASES.has_key(bias_def.name)\n\n # get the expected contents \n exp_biases = self.EXP_BIASES[bias_def.name]\n \n eq_(len(exp_biases), bias_def.biasinstance_set.count(), \n \"Wrong count for definition '%s' should be %s, is %s\" % \\\n (bias_def.name, exp_biases, bias_def.biasinstance_set.all()))\n \n # go through the instances connected to definition\n for bias in bias_def.biasinstance_set.all():\n \n # assert it's in the expected biases\n assert exp_biases.has_key(bias.subject_object.name)\n \n # assert the properties are as expected\n exp_conf, exp_desc = exp_biases[bias.subject_object.name]\n \n assert_almost_equal(exp_conf, bias.confidence, PLACES)\n eq_(exp_desc, bias.description) \n \n EXP_EXPLICIT_RULES = {\n \"Shoe rating.\": \n {\n ('Alice', 'Design Shoes'): (0.8, \"User Alice has rated Design Shoes.\"),\n ('Bob', 'Design Shoes'): (0.2, \"User Bob has rated Design Shoes.\"),\n },\n }\n \n def test_explicit_rules(self):\n \"\"\"Test the explicit rules and their definitions \n were created as expected\"\"\"\n \n rule_defs = ExplicitRuleDefinition.objects.all()\n \n # expect the definition count fits\n eq_(len(self.EXP_EXPLICIT_RULES), rule_defs.count())\n \n for rule_def in rule_defs:\n \n # assert the def is there\n assert self.EXP_EXPLICIT_RULES.has_key(rule_def.name)\n \n # get its expected rules\n exp_rules = self.EXP_EXPLICIT_RULES[rule_def.name]\n \n rules = ExplicitRuleInstance.objects.filter(definition=rule_def)\n \n for rule in rules:\n \n # assert the pair is expected\n assert exp_rules.has_key((rule.subject_object1.name, rule.subject_object2.name)), \\\n \"The pair %s, %s isn't expected\" % (rule.subject_object1.name, rule.subject_object2.name)\n\n exp_rule = exp_rules[(rule.subject_object1.name, rule.subject_object2.name)]\n \n assert_almost_equal(exp_rule[0], rule.expectancy, PLACES)\n eq_(exp_rule[1], rule.description)\n \n \nclass TestAbstractorRecommenderErrors(DBTestCase):\n \"\"\"Test various errors thrown by Abstractor and/or Recommender and/or Algorithm\"\"\"\n\n def test_invalid_relationship_weight(self):\n \"\"\"Test if the exception is raised for a relatioship with invalid weight\"\"\"\n \n # set some invalid weight, assert it throws the error \n w = ShoeRecommender.relationships[1].weight \n ShoeRecommender.relationships[1].weight = 1.5\n\n assert_raises(ConfigurationError, ShoeRecommender.build)\n\n # restore the original value\n ShoeRecommender.relationships[1].weight = w\n\n \n def test_invalid_rule_weight(self):\n \"\"\"Test if the exception is raised for a rule with invalid weight\"\"\"\n \n # set some invalid weight, assert it throws the error \n w = ShoeRecommender.rules[2].weight \n ShoeRecommender.rules[2].weight = 1.5\n\n assert_raises(ConfigurationError, ShoeRecommender.build)\n\n # restore the original value\n ShoeRecommender.rules[2].weight = w \n\n\n def test_invalid_confidence(self):\n \"\"\"Test if the exception is raised for a rule with invalid confidence\"\"\"\n \n # set some invalid weight, assert it throws the error \n c = ShoeRecommender.rules[0].confidence\n ShoeRecommender.rules[0].confidence = lambda a, b: 1.3\n\n assert_raises(ConfigurationError, ShoeRecommender.build)\n\n # restore the original value\n ShoeRecommender.rules[0].confidence = c\n\n def test_empty_predicted_relationship(self):\n \"\"\"Test building a recommender with emtpy predicted relationship\"\"\"\n \n # save and delete the predicted_relationship\n pr = ShoeRecommender.predicted_relationship\n ShoeRecommender.predicted_relationship = None\n \n # assert it raises an error\n assert_raises(ConfigurationError, ShoeRecommender.build)\n \n # restore the original predicted_relationship\n ShoeRecommender.predicted_relationship = pr\n \n def test_empty_subjects(self):\n \"\"\"Test building a recommender with empty subjects\"\"\"\n \n # delete all users\n User.objects.all().delete()\n \n # assert it raises an error\n assert_raises(ConfigurationError, ShoeRecommender.build) \n\n def test_empty_objects(self):\n \"\"\"Test building a recommender with empty objects\"\"\"\n \n # delete all shoes\n ShoePair.objects.all().delete()\n \n # assert it raises an error\n assert_raises(ConfigurationError, ShoeRecommender.build) \n \n def test_rule_description_error(self):\n \"\"\"Test building a recommender with strange rule description\"\"\"\n \n # save and mess up the description\n d = ShoeRecommender.rules[1].description \n ShoeRecommender.rules[1].description = \"\"\"Some messed %(crap)s description\"\"\"\n \n # assert it raises an error\n assert_raises(DescriptionKeyError, ShoeRecommender.build) \n \n # restore the original \n ShoeRecommender.rules[1].description = d\n\n def test_relationship_description_error(self):\n \"\"\"Test building a recommender with strange relationship description\"\"\"\n \n # save and mess up the description\n d = ShoeRecommender.relationships[0].description \n ShoeRecommender.relationships[0].description = \"\"\"Some other messed %(other_crap)s description\"\"\"\n \n # assert it raises an error\n assert_raises(DescriptionKeyError, ShoeRecommender.build) \n \n # restore the original \n ShoeRecommender.relationships[0].description = d\n\n def test_no_key_description_error(self):\n \"\"\"Test building a recommender with a description without keys\"\"\"\n \n # save and mess up the description\n d = ShoeRecommender.rules[2].description \n ShoeRecommender.rules[2].description = \"\"\"No-key description.\"\"\"\n \n # assert it doesn't raise any error\n ShoeRecommender.build()\n \n # restore the original \n ShoeRecommender.rules[2].description = d\n \n def test_empty_rules_rels(self):\n \"\"\"Test building the recommender with empty rules and relationships\"\"\"\n \n # save and delete rules and relationships\n rules = ShoeRecommender.rules\n relationships = ShoeRecommender.relationships\n \n ShoeRecommender.rules = ()\n ShoeRecommender.relationships = ()\n \n # build it, test it doesn't freeze\n ShoeRecommender.build() \n \n # restore\n ShoeRecommender.rules = rules\n ShoeRecommender.relationships = relationships\n \ndef _count_exp(conf):\n return 0.5 + conf/2\n \ndef _count_neg_exp(conf): \n return 0.5 - conf/2\n \nclass TestAggregator(TestEntities):\n \"\"\"Testing the building phase of the Linear Aggregator\"\"\"\n \n EXPECTED_AGGREGATES = {\n # S-O\n ('Alice', 'RS 130'): (_count_neg_exp(0.85), 'S-O', \n (\"Alice is from south, so RS 130 can't be recommended to him/her.\",)), # 0.075\n ('Alice', 'Rubber Shoes'): ((_count_exp(0.4) + _count_exp(0.1))/2, 'S-O',\n ('User Alice has viewed Rubber Shoes. User Alice is from the same city as the manufacturer of Rubber Shoes.',)), # 0.625\n ('Alice', 'Sneakers'): (_count_exp(0.1), 'S-O', \n (\"User Alice is from the same city as the manufacturer of Sneakers.\",)), # 0.55\n \n ('Bob', 'RS 130'): (_count_neg_exp(0.85), 'S-O', \n (\"Bob is from south, so RS 130 can't be recommended to him/her.\",)), # 0.075\n ('Bob', 'Rubber Shoes'): (_count_exp(0.1), 'S-O',\n (\"User Bob is from the same city as the manufacturer of Rubber Shoes.\",)), # 0.55\n ('Bob', 'Sneakers'): ((_count_exp(0.4) + _count_exp(0.1))/2, 'S-O',\n (\"User Bob has viewed Sneakers. User Bob is from the same city as the manufacturer of Sneakers.\",)), # 0.625\n\n ('Cindy', 'RS 130'): (_count_exp(0.1), 'S-O',\n (\"User Cindy is from the same city as the manufacturer of RS 130.\",)), # 0.55\n ('Cindy', 'Rubber Shoes'): (_count_exp(0.4), 'S-O',\n (\"User Cindy has viewed Rubber Shoes.\",)), # 0.7\n \n ('Daisy', 'RS 130'): (_count_exp(0.1), 'S-O',\n (\"User Daisy is from the same city as the manufacturer of RS 130.\",)), # 0.55\n \n # S-S\n ('Alice', 'Bob'): ((_count_exp(0.75 * 0.2) + _count_exp(0.3))/2, 'S-S', (\n \"Users Alice and Bob live in the same city. Users Alice and Bob are about the same age.\",\n \"Users Alice and Bob live in the same city. Users Bob and Alice are about the same age.\",\n \"Users Alice and Bob live in the same city. Users Bob and Alice are about the same age.\",\n \"Users Bob and Alice live in the same city. Users Bob and Alice are about the same age.\")), # 0.6125\n ('Cindy', 'Daisy'): (_count_exp(0.3), 'S-S', (\n \"Users Cindy and Daisy live in the same city.\",\n \"Users Daisy and Cindy live in the same city.\")), #0.65\n ('Edgar', 'Fionna'): (_count_exp(1 * 0.2), 'S-S', (\n \"Users Edgar and Fionna are about the same age.\",\n \"Users Fionna and Edgar are about the same age.\")),\n \n # O-O\n ('Rubber Shoes', 'Sneakers'): ((_count_exp(0.4) + _count_exp(0.1))/2, 'O-O', (\n \"The shoe pairs Rubber Shoes and Sneakers share some keywords. Shoes Sneakers and Rubber Shoes were made by the same manufacturer.\",\n \"The shoe pairs Rubber Shoes and Sneakers share some keywords. Shoes Rubber Shoes and Sneakers were made by the same manufacturer.\",\n \"The shoe pairs Sneakers and Rubber Shoes share some keywords. Shoes Sneakers and Rubber Shoes were made by the same manufacturer.\",\n \"The shoe pairs Sneakers and Rubber Shoes share some keywords. Shoes Rubber Shoes and Sneakers were made by the same manufacturer.\")), # 0.625\n ('Sneakers', 'Design Shoes'): (_count_exp(0.2), 'O-O',(\n \"The shoe pairs Sneakers and Design Shoes share some keywords.\",\n \"The shoe pairs Design Shoes and Sneakers share some keywords.\",)), # 0.6 \n }\n \"\"\"A dictionary: pair of entities : expectancy, entity_type, description\"\"\" \n\n def test_aggregates_created(self):\n \"\"\"Test that the aggregates were created as expected\"\"\"\n \n # filter aggregated instances for my recommender\n aggr_instances = AggregatedRelationshipInstance.objects.filter(\n recommender=self.recommender._get_recommender_model())\n \n # assert it has the expected length \n eq_(aggr_instances.count(), len(self.EXPECTED_AGGREGATES))\n \n for aggr_inst in aggr_instances.iterator():\n pair1 = (aggr_inst.subject_object1.name, aggr_inst.subject_object2.name)\n pair2 = (aggr_inst.subject_object2.name, aggr_inst.subject_object1.name) \n \n # try getting the instance from expected in both directions\n if self.EXPECTED_AGGREGATES.has_key(pair1):\n expected_expectancy, expected_rel_type, expected_descs = self.EXPECTED_AGGREGATES[pair1]\n else:\n if self.EXPECTED_AGGREGATES.has_key(pair2):\n expected_expectancy,expected_rel_type, expected_descs = self.EXPECTED_AGGREGATES[pair2]\n else:\n # if not found it's unexpected.\n assert False, \\\n \"Unexpected aggregate between '%s' and '%s' expectancy: %f\" % \\\n (pair1 + (aggr_inst.expectancy, ))\n \n # assert the order in the aggregate is right\n if aggr_inst.relationship_type == 'S-O':\n assert aggr_inst.subject_object1.entity_type == 'S' and aggr_inst.subject_object2.entity_type == 'O'\n else:\n assert aggr_inst.subject_object1.id < aggr_inst.subject_object2.id\n \n # assert the expectancy is as expected \n assert_almost_equal(aggr_inst.expectancy, expected_expectancy, PLACES,\n \"Expectancy is '%f' should be '%f' for the pair %s, %s, Obtained description: %s\" % \\\n ((aggr_inst.expectancy, expected_expectancy) + pair1 + (aggr_inst.description,))) \n \n # assert the relationship type is as expected \n eq_(aggr_inst.relationship_type, expected_rel_type,\n \"Relationship type is '%s' should be '%s' for the pair %s, %s\" % \\\n ((aggr_inst.relationship_type, expected_rel_type) + pair1)) \n \n # assert the description is as expected \n assert aggr_inst.description in expected_descs, \\\n \"Description is '%s' should be one of '%s' for the pair %s, %s\" % \\\n ((aggr_inst.description, expected_descs) + pair1) \n\n EXP_AGGR_BIASES = {\n 'Alice': (_count_exp(0.4 * 0.333333), \"User Alice likes many shoe pairs.\"),\n 'Bob': (_count_exp(0.4 * 0.333333), \"User Bob likes many shoe pairs.\"),\n 'Edgar': (_count_exp(0.4 * 0.66666), \"User Edgar likes many shoe pairs.\"),\n 'Sneakers': (_count_exp(0.8 * 0.6666666), \"Shoe pair Sneakers is popular\"),\n 'Rubber Shoes': (_count_exp(0.8 * 0.333333), \"Shoe pair Rubber Shoes is popular\"),\n 'RS 130': (_count_exp(0.8 * 0.333333), \"Shoe pair RS 130 is popular\"),\n }\n \n def test_aggregated_biases_created(self):\n \"\"\"Test that the bias aggregates were created as expected\"\"\"\n \n # get the aggregates, check count\n # \n aggregates = AggregatedBiasInstance.objects.filter(recommender=self.recommender._get_recommender_model()) \n eq_(aggregates.count(), len(self.EXP_AGGR_BIASES))\n \n # go through created aggregates, assert its in expected and \n # with expected attrs\n for aggr in aggregates:\n \n assert self.EXP_AGGR_BIASES.has_key(aggr.subject_object.name)\n exp_expectancy, exp_desc = self.EXP_AGGR_BIASES[aggr.subject_object.name]\n \n assert_almost_equal(aggr.expectancy, exp_expectancy, PLACES,\n \"Expected expectancy %f, got %f for %s\" % \\\n (exp_expectancy, aggr.expectancy, aggr.subject_object.name) )\n \n eq_(aggr.description, exp_desc) \n \nclass TestAggregatorAverage(TestBuildAverage): \n \"\"\"Testing the aggregator of the average build\"\"\"\n\n def test_aggregated_biases_created(self):\n \"\"\"AverageRecommender: Test that the bias aggregates were created as expected\"\"\" \n # call the same method on the normal aggregator\n ta = TestAggregator(\"test_aggregated_biases_created\")\n ta.recommender = self.recommender\n ta.test_aggregated_biases_created()\n EXPECTED_AGGREGATES = {\n # S-S\n ('Alice', 'Bob'): ((_count_exp(0.75 * 0.2) + _count_exp(0.3))/2, 'S-S', (\n \"Reason 1: Users Alice and Bob live in the same city. Reason 2: Users Alice and Bob are about the same age.\",\n \"Reason 1: Users Alice and Bob live in the same city. Reason 2: Users Bob and Alice are about the same age.\",\n \"Reason 1: Users Alice and Bob live in the same city. Reason 2: Users Bob and Alice are about the same age.\",\n \"Reason 1: Users Bob and Alice live in the same city. Reason 2: Users Bob and Alice are about the same age.\")), # 0.6125\n ('Cindy', 'Daisy'): (_count_exp(0.3), 'S-S', (\n \"Users Cindy and Daisy live in the same city.\",\n \"Users Daisy and Cindy live in the same city.\")), #0.65\n ('Edgar', 'Fionna'): (_count_exp(1 * 0.2), 'S-S', (\n \"Users Edgar and Fionna are about the same age.\",\n \"Users Fionna and Edgar are about the same age.\")),\n \n # O-O\n ('Rubber Shoes', 'Sneakers'): ((_count_exp(0.4) + _count_exp(0.1))/2, 'O-O', (\n \"Reason 1: The shoe pairs Rubber Shoes and Sneakers share some keywords. Reason 2: Shoes Sneakers and Rubber Shoes were made by the same manufacturer.\",\n \"Reason 1: The shoe pairs Rubber Shoes and Sneakers share some keywords. Reason 2: Shoes Rubber Shoes and Sneakers were made by the same manufacturer.\",\n \"Reason 1: The shoe pairs Sneakers and Rubber Shoes share some keywords. Reason 2: Shoes Sneakers and Rubber Shoes were made by the same manufacturer.\",\n \"Reason 1: The shoe pairs Sneakers and Rubber Shoes share some keywords. Reason 2: Shoes Rubber Shoes and Sneakers were made by the same manufacturer.\")), # 0.625\n ('Sneakers', 'Design Shoes'): (0.625, 'O-O',(\n \"Reason 1: Sneakers belong to the Casual category. Design Shoes belong to the Casual category. Reason 2: The shoe pairs Sneakers and Design Shoes share some keywords.\",\n \"Reason 1: Sneakers belong to the Casual category. Design Shoes belong to the Casual category. Reason 2: The shoe pairs Design Shoes and Sneakers share some keywords.\",\n \"Reason 1: Design Shoes belong to the Casual category. Sneakers belong to the Casual category. Reason 2: The shoe pairs Sneakers and Design Shoes share some keywords.\",\n \"Reason 1: Design Shoes belong to the Casual category. Sneakers belong to the Casual category. Reason 2: The shoe pairs Design Shoes and Sneakers share some keywords.\",)), # 0.6 \n }\n\n def test_aggregates_created(self):\n \"\"\"AverageRecommender: Test that the aggregates were created as expected\"\"\"\n \n # instantiate the common aggregator\n ta = TestAggregator(\"test_aggregates_created\")\n ta.recommender = self.recommender\n\n # leave in the aggregates only similarity relationship \n ta.EXPECTED_AGGREGATES = self.EXPECTED_AGGREGATES\n\n \n # call the same method on the normal aggregator\n ta.test_aggregates_created()\n \n \nclass DTestAlgorithm(TestEntities):\n \"\"\"Testing the building phase of the SimpleAlgorithm\"\"\" \n \n \n EXPECTED_PREDICTIONS = {\n # S-O\n ('Alice', 'RS 130'): _count_neg_exp(0.85), # 0.075\n ('Alice', 'Rubber Shoes'): (_count_exp(0.4) + _count_exp(0.1))/2, # 0.625\n ('Alice', 'Sneakers'): _count_exp(0.1), # 0.55\n ('Alice', 'Design Shoes'): _count_exp(0.2), # 0.6\n \n ('Bob', 'RS 130'): _count_neg_exp(0.85), # 0.075\n ('Bob', 'Rubber Shoes'): _count_exp(0.1), # 0.55\n ('Bob', 'Sneakers'): (_count_exp(0.4) + _count_exp(0.1))/2, # 0.625\n ('Bob', 'Design Shoes'): _count_exp(0.2), # 0.6\n\n ('Cindy', 'RS 130'): _count_exp(0.1), # 0.55\n ('Cindy', 'Rubber Shoes'): _count_exp(0.4), # 0.7 \n \n ('Daisy', 'RS 130'): _count_exp(0.1), # 0.55\n \n ('Edgar', 'Sneakers'): (_count_exp(0.1) + _count_exp(0.4))/2, # 0.625 \n \n ('Fionna', 'Rubber Shoes'): _count_exp(0.2),\n ('Fionna', 'RS 130'): _count_exp(0.2), # 0.6 \n } \n \n def dtest_predictions_created(self):\n \"\"\"Test that the predictions were created as expected\"\"\"\n \n # filter aggregated instances for my recommender\n pred_instances = RelationshipPredictionInstance.objects.filter(\n recommender=ShoeRecommender._get_recommender_model())\n \n # assert it has the expected length \n eq_(pred_instances.count(), len(self.EXPECTED_PREDICTIONS), \n \"expected: %d, obtained: %d, obtained instances: %s\" % \\\n (len(self.EXPECTED_PREDICTIONS), pred_instances.count(), pred_instances))\n \n for pred_inst in pred_instances.iterator():\n pair1 = (pred_inst.subject_object1.name, pred_inst.subject_object2.name) \n \n # try getting the instance from expected in both directions\n assert self.EXPECTED_PREDICTIONS.has_key(pair1), \\\n \"Unexpected prediction between '%s' and '%s' expectancy: %f\" % \\\n (pair1 + (pred_inst.expectancy, ))\n expected_prediction = self.EXPECTED_PREDICTIONS[pair1]\n\n # assert the expectancy is as expected \n assert_almost_equal(pred_inst.expectancy, expected_prediction, PLACES, \n \"Prediction is '%f' should be '%f' for the pair %s, %s\" % \\\n ((pred_inst.expectancy, expected_prediction) + pair1)) \n \n\n" }, { "alpha_fraction": 0.5023733973503113, "alphanum_fraction": 0.5142405033111572, "avg_line_length": 30.600000381469727, "blob_id": "100bdb93f6cbb8886489104c5cf6d157b1ba74eb", "content_id": "22af5dba242c3bd59bcfac3c2710b10a4c089a63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/UnresystCD/code/adapter/unresyst/combinator/confidence_factor.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Combinator using confidence factor calculus.\"\"\"\n\nfrom base import BaseCombinator\n\n\n \n\nclass ConfidenceFactorCombinator(BaseCombinator):\n \"\"\"A combinator using weighted average\n \"\"\"\n \n def _combine(self, combination_elements, ResultClass):\n \"\"\"See the base class for documentation\"\"\"\n \n res_exp = combination_elements[0].get_expectancy()\n \n # go through the combination elements\n for ce in combination_elements[1:]:\n \n # convert to confidence factors\n res_cf = 2 * res_exp - 1\n ce_cf = 2 * ce.get_expectancy() -1\n \n # count the confidence factor combination\n if res_cf > 0 and ce_cf > 0: \n comb_cf = res_cf + ce_cf * (1 - res_cf)\n \n elif res_cf < 0 and ce_cf < 0:\n comb_cf = res_cf + ce_cf * (1 + res_cf)\n \n else:\n comb_cf = (res_cf * ce_cf) / (1 - min(abs(res_cf), abs(ce_cf)))\n \n # and back to expectancy\n res_exp = (comb_cf + 1) / 2\n \n\n desc = self._concat_descriptions(combination_elements)\n \n return ResultClass(expectancy=res_exp, description=desc)\n" }, { "alpha_fraction": 0.5262203812599182, "alphanum_fraction": 0.5283953547477722, "avg_line_length": 32.290321350097656, "blob_id": "ef402b712baa9bec74f7b91d52c575dc73ac448b", "content_id": "fef96f14982817083f7791d2f051bc5be587bcdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8276, "license_type": "no_license", "max_line_length": 98, "num_lines": 248, "path": "/code/adapter/unresyst/recommender/evaluation.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The classes related to recommender evaluation\"\"\"\nimport os.path\nimport codecs\n\nfrom unresyst.exceptions import EmptyTestSetError\nfrom unresyst.constants import *\n\nfrom settings import LOG_DIRECTORY\n\nclass BaseEvaluator(object):\n \"\"\"The base class for all evaluators\"\"\"\n \n EvaluationPairModel = None\n \"\"\"The model of the class containing testing pairs. To be overriden.\n A EvaluationPair subclass.\n \"\"\"\n \n prediction_metric = None\n \"\"\"A function taking the test pair model that is called on the evaluated pairs\n to obtain a number \n \"\"\"\n \n recommendation_metric = None\n \"\"\"A function taking the test pair model and number of desired recommendations,\n that is called on the evaluated pair model to obtain a number \n \"\"\"\n \n # methods to be called before the build:\n # \n \n @classmethod\n def select_evaluation_pairs(cls):\n \"\"\"Select the pairs for recommender evaluation.\n \n The selected pairs will be added to the EvaluationPairModel and\n removed from the models where they are now\n \"\"\"\n cls.EvaluationPairModel.select()\n \n \n @classmethod\n def export_evaluation_pairs(cls, filename):\n \"\"\"Export the evaluation pair ids to a csv \n file of the given name.\n \n @type filename: str\n @param filename: the full path to the file\n \n @raise FileNotExists and other file open errors.\n \"\"\"\n \n with open(filename, 'w') as f:\n \n # export the relationships to the given file\n cls.EvaluationPairModel.export(f)\n\n \n # methods to be called after the build:\n # \n \n @classmethod\n def _get_cleared_pairs(cls):\n \"\"\"Get the test pairs, check whether they aren't empty and\n clear the results.\n \n @rtype: QuerySet\n @return: the test pairs\n \n @raise EmptyTestSetError: if the set is empty\n \"\"\"\n # get the dataset and test if not empty\n qs_pairs = cls.EvaluationPairModel.objects.all() \n \n # remove all previous success and obtained expectancies\n qs_pairs.update(obtained_expectancy=None, is_successful=None) \n \n if not qs_pairs:\n raise EmptyTestSetError(\"Call the select_validation_pairs()\"+ \\\n \" method first\") \n \n return qs_pairs\n\n @staticmethod\n def _open_logfile(directory, filename):\n \"\"\"Open a log file\n \n @rtype: file\n \"\"\"\n if directory and filename:\n full_name = os.path.join(directory, filename)\n return codecs.open(full_name, mode='w', encoding='utf-8')\n\n return None \n\n @classmethod\n def evaluate_predictions(cls, recommender, save_predictions=False):\n \"\"\"Evaluate each evaluation pair by calling the predict_relationship\n method.\n \n The results are written to the pair model\n \n @type recommender: Recommender\n @param recommender: the built recommender on which the predictions\n should be evaluated \n \n @type save_predictions: bool\n @param save_predictions: should the generated predictions be saved to db?\n \n @rtype: float\n @return: the result of the metric \n\n @raise EmptyTestSetError: if the test set is empty \n \"\"\"\n # if the settings are given, open a log file and log\n fpreds = cls._open_logfile(LOG_DIRECTORY, LOG_PREDICTIONS_FILENAME)\n fhits = cls._open_logfile(LOG_DIRECTORY, LOG_HITS_FILENAME)\n \n # get the pairs\n qs_pairs = cls._get_cleared_pairs()\n all_count = qs_pairs.count() \n\n # initialize \n i = 0\n succ_count = 0\n non_triv_count = 0\n \n print \"Processing %d pairs...\" % all_count\n \n # go through the pairs\n for pair in qs_pairs.iterator():\n \n # evaluate\n prediction = recommender.predict_relationship(pair.subj, pair.obj, save_predictions)\n \n # log\n if fpreds:\n fpreds.write(u\"%s\\n\" % prediction.__unicode__())\n \n pair.obtained_expectancy = prediction.expectancy\n pair.is_successful = pair.get_success()\n pair.save() \n \n i += 1\n \n if i % 100 == 0:\n print \"%d pairs processed\" % i\n \n if pair.is_successful:\n succ_count += 1\n \n if fhits:\n fhits.write(u'%s\\n' % prediction.__unicode__())\n \n if pair.obtained_expectancy < TRIVIAL_EXPECTANCY:\n non_triv_count += 1\n \n # this isn't very best practice-following but we don't care for file corruption\n if fhits:\n fhits.close()\n \n if fpreds:\n fpreds.close()\n \n # count and print the success rate \n cls.success_rate = float(succ_count)/ all_count\n\n print \"Success rate: %f (%d/%d)\" % (cls.success_rate, succ_count, all_count)\n print \"%d of %d successful predictions were non-trivial.\" % (non_triv_count, succ_count)\n \n # count the metric \n return cls.prediction_metric()\n \n @classmethod\n def evaluate_recommendations(cls, recommender, count):\n \"\"\"Evaluate recommendations obtained for the subjects in the test\n pairs.\n \n @type recommender: Recommender\n @param recommender: the built recommender on which the predictions\n should be evaluated\n \n @type count: int\n @param count: the number of recommendations to get \n \n @rtype: float\n @return: the result of the metric \n \"\"\"\n # if the settings are given, open a log file and log \n fhits = cls._open_logfile(LOG_DIRECTORY, LOG_HITS_FILENAME)\n frecs = cls._open_logfile(LOG_DIRECTORY, LOG_RECOMMENDATIONS_FILENAME)\n \n # get the cleared pair dataset \n qs_pairs = cls._get_cleared_pairs()\n \n # take ids of subjects from the test pairs \n subj_id_list = qs_pairs.values_list('subj__pk',flat=True).distinct()\n \n # take the whole subjects\n qs_subjects = recommender.subjects.filter(pk__in=subj_id_list) \n\n i = 0 \n hit_count = 0 \n for subj in qs_subjects: \n \n i += 1\n \n if i % 10 == 0:\n print \"%d subjects processed\" % i\n \n # for each subject in the test pair get its recommendations\n recommendations = recommender.get_recommendations(subj, count) \n \n \n # mark each recommended object that is in the test set as successful\n #\n for rec in recommendations:\n\n # if it should be logged, log it\n if frecs:\n frecs.write(u'%s\\n' % rec.__unicode__())\n \n obj = rec.object_\n \n # filter the test pair\n qs_pair = cls.EvaluationPairModel.objects.filter(subj=subj, obj=obj)\n \n # if it's in the test pair mark it as successful\n for pair in qs_pair:\n \n hit_count += 1\n \n pair.is_successful = True\n pair.save()\n \n if fhits:\n fhits.write(u'%s\\n' % rec.__unicode__())\n\n # this isn't very best practice-following but we don't care for file corruption\n if fhits:\n fhits.close()\n \n if frecs:\n frecs.close()\n \n print \"%d hits recorded, counting metric...\" % hit_count\n \n # count the metric \n return cls.recommendation_metric(count)\n \n \n \n\n" }, { "alpha_fraction": 0.7510204315185547, "alphanum_fraction": 0.7530612349510193, "avg_line_length": 36.69230651855469, "blob_id": "dd47d8c9649e3b251edb94912656715064a4e277", "content_id": "6a020e161580ee21d4d2238f749bfa815261ef8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 75, "num_lines": 13, "path": "/UnresystCD/code/adapter/flixster/mahout_recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The representation of the Mahout recommender in Unresyst.\"\"\"\n\nfrom unresyst.recommender.external_recommender import ExternalRecommender\nfrom recommender import MovieRecommender\n\nclass MahoutMovieRecommender(ExternalRecommender, MovieRecommender):\n \"\"\"An external artist recommender - both novel and non-novel artists\"\"\"\n \n name = \"Mahout movie recommender\"\n \"\"\"The name\"\"\"\n \n explicit_rating_rule = MovieRecommender.rules[0]\n \"\"\"The first one is the explicit one\"\"\"\n" }, { "alpha_fraction": 0.6203219890594482, "alphanum_fraction": 0.6234704852104187, "avg_line_length": 32.91990280151367, "blob_id": "042d921424fee5827dca328ed2ba70d90411e1bd", "content_id": "a1b0dbb96e6b153bf61ff07f114a55cc9171088c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13975, "license_type": "no_license", "max_line_length": 100, "num_lines": 412, "path": "/UnresystCD/code/adapter/unresyst/models/abstractor.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Models which instances are created by the abstractor package.\n\nA representation of each defined role and relationship.\n\"\"\"\n\nfrom django.db import models\n\nfrom unresyst.constants import *\nfrom base import BaseRelationshipInstance, ContentTypeModel, \\\n BaseRelationshipDefinition\nfrom unresyst.exceptions import InvalidParameterError\n\ndef _count_expectancy(is_positive, weight, confidence=1):\n \"\"\"Count the expectancy for the instance as 1/2 +- weighted_confidence/2 \n .. depending on is_positive.\n \n @type is_positive: bool\n @param is_positive: is the rule/relationship/bias positive for the \n predicted_relationship?\n \n @type weight: float\n @param weight: the static weight of the rule/relationship/bias\n\n @type confidence: float\n @param confidence: the confidence of the rule/relationship/bias depending\n on the entity\n \n @rtype: float\n @return: the probability of the predicted_relationship appearing between\n the entities in the pair.\n \"\"\"\n \n # get factor \n factor = 1 if is_positive else -1 \n \n return 0.5 + factor * ((weight * confidence) / 2)\n\n\n# Definitions:\n# \n\nclass PredictedRelationshipDefinition(BaseRelationshipDefinition):\n \"\"\"A definition of a the predicted relationship\"\"\"\n\n class Meta:\n app_label = 'unresyst' \n \nclass ExplicitRuleDefinition(BaseRelationshipDefinition):\n \"\"\"A definition of an explicit feedback rule\"\"\"\n \n class Meta:\n app_label = 'unresyst' \n\n\nclass RuleRelationshipDefinition(BaseRelationshipDefinition):\n \"\"\"A definition of a rule/relationship. Represents the rule/relationship\n given by the domain expert in the database.\"\"\" \n \n weight = models.FloatField()\n \"\"\"The weight of the rule/relationship. A number from [0, 1].\"\"\"\n \n relationship_type = models.CharField(max_length=MAX_LENGTH_RELATIONSHIP_TYPE, \n choices=RELATIONSHIP_TYPE_CHOICES)\n \"\"\"A string indicating whether it's a subject, object or both.s/o/so\"\"\"\n \n is_positive = models.BooleanField()\n \"\"\"Is the rule/relationship positive for the predicted_relationship?\"\"\"\n\n class Meta:\n app_label = 'unresyst' \n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\" \n ret = super(RuleRelationshipDefinition, self).__unicode__()\n\n return ret + \"%f, positive: %s\" % (self.weight, self.is_positive)\n \n# instances:\n#\n\nclass RelationshipInstance(BaseRelationshipInstance, ContentTypeModel):\n \"\"\"The relationship between two subject/objects.\n \n All relationships are \"symetrical\" in sense that if there is \n a relationship in the given direction there can't be one in the opposite.\n \n Weight of the relationship is in the definition.\n \n The model also contains the predicted relationship instances.\n \"\"\"\n \n definition = models.ForeignKey('unresyst.BaseRelationshipDefinition')\n \"\"\"The definition of the relationship. Pointing to the base, because\n it has to be also for the predicted relationship instances.\n \"\"\"\n \n additional_unique = ('definition', )\n \"\"\"There can be multiple pairs for one recommender\"\"\"\n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('subject_object1', 'subject_object2', 'definition')\n \"\"\"For each definition there can be only one subject-object pair.\"\"\"\n\n def get_expectancy(self, _redirect_to_leaf=True):\n \"\"\"Get the instance expectancy counted \n \n @type redirect_to_leaf: bool\n @param redirect_to_leaf: if True, the call is redirected to the leaf class.\n When called from outside _redirect_to_leaf should always be True, unless\n it's sure it's called on the leaf class instance.\n \n @rtype: float from [0, 1]\n @return: the aggregated expectancy.\n \"\"\"\n \n # redirect to leaf or not\n if _redirect_to_leaf:\n return self.as_leaf_class().get_expectancy(_redirect_to_leaf=False)\n \n # get the whole object for definition\n leaf_definition = self.definition.as_leaf_class() \n \n return _count_expectancy(\n is_positive=leaf_definition.is_positive,\n weight=leaf_definition.weight)\n\n @classmethod\n def filter_predicted(cls, recommender_model):\n \"\"\"Get queryset of the instances of the predicted relationship.\n \n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender whose instances should\n be aggregated. \n\n @rtype: QuerySet\n @returns: instances of the predicted relationship for the given\n recommender\n \"\"\"\n # get the predicted relationship definition\n pred_rel_def = PredictedRelationshipDefinition.objects.get(\n recommender=recommender_model)\n \n return cls.objects.filter(definition=pred_rel_def) \n\n\nclass ExplicitRuleInstance(BaseRelationshipInstance):\n \"\"\"The explicit preference of a subject to an object.\n \n The relationship isn't weighted. \n \"\"\"\n \n definition = models.ForeignKey('unresyst.ExplicitRuleDefinition')\n \"\"\"The definition of the relationship. \n \"\"\"\n\n expectancy = models.FloatField()\n \"\"\"The normalized explicit preference of the given subject to the given \n object.\n A number from [0, 1].\n \"\"\"\n \n additional_unique = ('definition', )\n \"\"\"There can be multiple pairs for one recommender\"\"\"\n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('subject_object1', 'subject_object2', 'definition')\n \"\"\"For each definition there can be only one subject-object pair.\"\"\"\n\n def get_expectancy(self, _redirect_to_leaf=True):\n \"\"\"Get the instance expectancy counted \n \n @type redirect_to_leaf: bool\n @param redirect_to_leaf: to stay compatible with other classes\n \n @rtype: float from [0, 1]\n @return: the expectancy.\n \"\"\" \n return self.expectancy \n\n\nclass RuleInstance(RelationshipInstance):\n \"\"\"The rule applied to a pair of subjects/objects.\"\"\" \n \n confidence = models.FloatField()\n \"\"\"The confidence of the given rule being positive/negative (depending on\n definition.is_positive) in means of the predicted_relationship\n A number from [0, 1].\n \"\"\"\n \n def get_expectancy(self, _redirect_to_leaf=True):\n \"\"\"Get the instance expectancy counted as 1/2 +- weight*confidence/2 .. depending on whether\n the relationship is positive.\n\n @type redirect_to_leaf: bool\n @param redirect_to_leaf: if True, the call is redirected to the leaf class.\n When called from outside _redirect_to_leaf should always be True, unless\n it's sure it's called on the leaf class instance. \n \n @rtype: float from [0, 1]\n @return: the aggregated expectancy.\n \"\"\"\n # get the whole object for definition\n leaf_definition = self.definition.as_leaf_class()\n \n return _count_expectancy(\n is_positive=leaf_definition.is_positive, \n weight=leaf_definition.weight,\n confidence=self.confidence) \n \n class Meta:\n app_label = 'unresyst'\n \n# clusters:\n# \n\nclass ClusterSet(models.Model):\n \"\"\"A set of clusters defined by the user. E.g. 'Gender'.\"\"\"\n\n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"A textual characterization of the cluster set.\"\"\" \n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender to which the cluster set belongs.\"\"\" \n\n entity_type = models.CharField(max_length=MAX_LENGTH_ENTITY_TYPE, \\\n choices=ENTITY_TYPE_CHOICES)\n \"\"\"A string indicating whether the contained clusters are for subjects,\n objects or both.s/o/so\"\"\"\n\n weight = models.FloatField()\n \"\"\"The weight of the similarity inferred from two subject/objects \n belonging to one cluster. A number from [0, 1].\n \"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('name', 'recommender')\n \"\"\"There can be only one cluster set with the given name for \n a recommender.\n \"\"\"\n\n\nclass Cluster(models.Model):\n \"\"\"A cluster (class of subjects/objects) defined by the user.\n E.g. 'Female'\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"A textual characterization of the cluster\"\"\"\n \n cluster_set = models.ForeignKey('unresyst.ClusterSet')\n \"\"\"The set the cluster belongs to\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"%s - %s\" % (self.cluster_set, self.name)\n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('name', 'cluster_set')\n \"\"\"There can be only one cluster cluster set of the given name in the \n cluster set.\n \"\"\"\n\nclass ClusterMember(models.Model):\n \"\"\"A membership of a subject/object in the cluster.\"\"\"\n \n cluster = models.ForeignKey('unresyst.Cluster')\n \"\"\"The cluster\"\"\"\n \n member = models.ForeignKey('unresyst.SubjectObject')\n \"\"\"The cluster member\"\"\"\n \n description = models.TextField(default='', blank=True)\n \"\"\"The description of the membership.\"\"\" \n \n confidence = models.FloatField()\n \"\"\"The confidence of the member belonging to the cluster.\n A number from [0, 1].\n \"\"\"\n \n @classmethod\n def get_pair_expectancy(cls, cluster_member_pair):\n \"\"\"Return the expectancy that will be used for similarity counting \n for cluster members\n \n @type cluster_member_pair: a tuple (pair) of ClusterMember\n @param cluster_member_pair: cluster members for counting the similarity\n of the members.\n \n @rtype: float\n @return: the expectancy that the members are similar\n \"\"\"\n cm1, cm2 = cluster_member_pair\n \n # if they aren't from the same cluster raise an error\n if cm1.cluster != cm2.cluster:\n raise InvalidParameterError(\n message=\"The members to combine don't belong to the same cluster.\",\n recommender=cm1.cluster.cluster_set.recommender, \n parameter_name='cluster_member_pair', \n parameter_value=cluster_member_pair)\n \n conf = cm1.confidence * cm2.confidence\n \n return _count_expectancy(\n is_positive=True,\n weight=cm1.cluster.cluster_set.weight,\n confidence=conf)\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"(%s, %s): %f\" % (self.cluster, self.member, self.confidence)\n \n \n class Meta:\n app_label = 'unresyst' \n \n unique_together = ('cluster', 'member')\n \"\"\"There can be only one membership for a subjectobject in the given \n cluster.\n \"\"\" \n\n# Bias:\n#\n \nclass BiasDefinition(models.Model):\n \"\"\"The definition of a bias\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the bias\"\"\" \n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender to which the definition belongs. \n \"\"\"\n \n weight = models.FloatField()\n \"\"\"The weight of the bias\n belonging to one cluster. A number from [0, 1].\n \"\"\"\n \n entity_type = models.CharField(max_length=MAX_LENGTH_ENTITY_TYPE, \\\n choices=ENTITY_TYPE_CHOICES)\n \"\"\"A string indicating whether the contained bias is for subjects,\n objects or both.s/o/so\"\"\"\n \n is_positive = models.BooleanField()\n \"\"\"Is the bias positive (adding a probability) for the predicted_relationship?\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance.\"\"\"\n return self.name\n \n class Meta:\n app_label = 'unresyst' \n unique_together = ('name', 'recommender')\n \"\"\"There can be only one bias definition with the given name for \n a recommender.\n \"\"\"\n \n \nclass BiasInstance(models.Model):\n \"\"\"A bias for a particular subject/object.\"\"\" \n \n confidence = models.FloatField()\n \"\"\"The confidence of the given bias being positive/negative (depending on\n definition.is_positive) in means of the predicted_relationship\n A number from [0, 1].\n \"\"\" \n \n subject_object = models.ForeignKey('unresyst.SubjectObject')\n \"\"\"The biased subject/object.\"\"\"\n \n definition = models.ForeignKey('unresyst.BiasDefinition')\n \"\"\"The definition of the bias.\n \"\"\"\n \n description = models.TextField(default='', blank=True)\n \"\"\"The filled description of the bias.\"\"\" \n\n \n class Meta:\n app_label = 'unresyst' \n unique_together = ('definition', 'subject_object')\n \"\"\"There can be only one bias for each definition on one subjectobject\n a recommender.\n \"\"\" \n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance.\"\"\"\n return u\"%s: %s\" % (self.definition, self.subject_object)\n\n def get_expectancy(self):\n \"\"\"Get the expectancy (probability that the subjectobject will be \n in the predicted_relationship, according to the bias).\n \n @rtype: float\n @return: expectancy\n \"\"\"\n return _count_expectancy(\n is_positive=self.definition.is_positive,\n weight=self.definition.weight,\n confidence=self.confidence)\n" }, { "alpha_fraction": 0.6214788556098938, "alphanum_fraction": 0.6214788556098938, "avg_line_length": 31.75, "blob_id": "85e0020087a2ab13f553e98f84f39e95e03b0b20", "content_id": "8f12c0e536194f11e08ce5be4657ed527d83d9e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1704, "license_type": "no_license", "max_line_length": 83, "num_lines": 52, "path": "/code/adapter/unresyst/aggregator/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The module defines base class for the aggregator package.\"\"\"\n\nclass BaseAggregator(object):\n \"\"\"The base (abstract) class for all aggregators. Defines the interface.\"\"\" \n \n # Build phase:\n #\n\n def __init__(self, combinator=None):\n \"\"\"The initializer\"\"\"\n \n self.combinator = combinator\n \"\"\"The combinator that should be used during aggregating\"\"\"\n\n \n def aggregate_rules_relationships(cls, recommender_model):\n \"\"\"Aggregate the rule and relationship instances.\n \n Aggregates instances of the relationships and rules from \n the recommender_model to create AggregatedRelationshipInstance\n instances. Instances are saved to db.\n \n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender whose instances should\n be aggregated. \n \"\"\"\n pass\n \n def aggregate_biases(cls, recommender_model):\n \"\"\"Aggregate bias instances.\n \n Aggregates instances of the biases from the recommender_model \n to create AggregatedBiasInstance instances. Instances are saved to db.\n \n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender whose instances should\n be aggregated. \n \"\"\"\n pass\n\n\n # Update phase:\n # \n \n @classmethod\n def update(cls, recommender, instance_changes):\n \"\"\"Apdate the aggregated relationships according to the instance \n changes.\n \n @rtype: uvidime\n @return: the changes performed on the aggregated relationships. \n \"\"\"\n\n" }, { "alpha_fraction": 0.5711066722869873, "alphanum_fraction": 0.6090385317802429, "avg_line_length": 31.73770523071289, "blob_id": "0ce8beca8e8c9058007d21a35e17f232ffdfa132", "content_id": "cc21b4cc085d14fb0118532c1aa738c285744b73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7988, "license_type": "no_license", "max_line_length": 145, "num_lines": 244, "path": "/code/adapter/lastfm/models.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The models for the last.fm datasets.\"\"\"\n\n\n\"\"\"\nuser_000001\t2009-05-04T23:08:57Z\tf1b1cf71-bd35-4e99-8624-24a6e15f133a\tDeep Dish\t\tFuck Me Im Famous (Pacha Ibiza)-09-28-2007\nuser_000001\t2009-04-01T14:24:52Z\t6c639b1f-5390-4ec9-b3f8-7afe1562e107\tLady Alma\t\tRunning For Nothing [(Instrumental)]/(Instrumental)\nuser_000001\t2009-02-05T22:58:19Z\t440cb8ef-cee3-4711-8408-b1bd6e93f390\tTowa Tei\tc605c224-245b-4107-b9cd-ab1b2c6adb0a\tMind Wall (Feat. Miho Hatori)\n\n#id\tgender\tage\tcountry\tregistered\nuser_000001\tm\t\tJapan\tAug 13, 2006\nuser_000002\tf\t\tPeru\tFeb 24, 2006\nuser_000003\tm\t22\tUnited States\tOct 30, 2005\nuser_000004\tf\t\t\tApr 26, 2006\nuser_000005\tm\t\tBulgaria\tJun 29, 2006\nuser_000006\t\t24\tRussian Federation\tMay 18, 2006\nuser_000007\tf\t\tUnited States\tJan 22, 2006\nuser_000008\tm\t23\tSlovakia\tSep 28, 2006\nuser_000009\tf\t19\tUnited States\tJan 13, 2007\nuser_000010\tm\t19\tPoland\tMay 4, 2006\nuser_000011\tm\t21\tFinland\tSep 8, 2005\n\"\"\"\n\nfrom django.db import models\nfrom django.db.models import Min\n\nfrom unresyst.models import BaseEvaluationPair\n\n\n\nfrom constants import *\n\nclass User(models.Model):\n \"\"\"The user\"\"\"\n \n gender = models.CharField(max_length=1, default='')\n \"\"\"The english name of the enumerator. Should be translated by ugettext before \n displaying.\"\"\"\n \n age = models.PositiveIntegerField(null=True, default=None)\n \"\"\"The age of the user.\"\"\"\n \n registered = models.DateTimeField(null=True, default=None)\n \"\"\"Date when the user was registered\"\"\"\n\n country = models.ForeignKey('Country', null=True)\n \"\"\"The country the user is from\"\"\" \n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return 'user_%d' % self.id \n \n \nclass Country(models.Model):\n \"\"\"A model for a country.\"\"\" \n \n name = models.CharField(max_length=MAX_LENGTH_NAME, unique=True)\n \"\"\"The name of the city.\"\"\" \n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n \n \nclass Track(models.Model):\n \"\"\"A track (song)\"\"\"\n \n guid = models.CharField(unique=True, max_length=40)\n \"\"\"The id of the track in the musicbrainz database\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the track.\"\"\"\n\n artist = models.ForeignKey('Artist')\n \"\"\"The author of the track\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"%s: %s\" % (self.artist, self.name)\n\n\nclass Artist(models.Model):\n \"\"\"An artist (band or singer)\"\"\"\n \n guid = models.CharField(unique=True, max_length=40)\n \"\"\"The id of the singer in the musicbrainz database\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME)\n \"\"\"The name of the track.\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass Scrobble(models.Model):\n \"\"\"A representation of the fact that a user has played a track\"\"\"\n \n user = models.ForeignKey('User')\n \"\"\"The user\"\"\"\n \n track = models.ForeignKey('Track')\n \"\"\"The played track\"\"\"\n \n timestamp = models.DateTimeField()\n \"\"\"The date and time the track was played\"\"\"\n \n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"(%s, %s)\" % (self.user, self.track)\n\n\nclass ArtistTag(models.Model):\n \"\"\"A representation a artist being tagged by the tag\"\"\"\n \n artist = models.ForeignKey('Artist')\n \"\"\"The artist\"\"\"\n \n tag = models.ForeignKey('Tag')\n \"\"\"The tag\"\"\" \n \n count = models.PositiveIntegerField()\n \"\"\"How many times the artist was tagged by the tag\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return u\"(%s, %s: %d)\" % (self.artist, self.tag, self.count) \n \n\n \nclass Tag(models.Model):\n \"\"\"A representation of a tag (label) in the system\"\"\"\n \n name = models.CharField(max_length=MAX_LENGTH_NAME, unique=True)\n \"\"\"The label.\"\"\"\n \n gender_specific = models.CharField(max_length=1, default='')\n \"\"\"If the tag is preferred by a gender, the gender is here\"\"\"\n\n def __unicode__(self):\n \"\"\"Return a printable representation of the instance\"\"\"\n return self.name \n\n\nclass BaseArtistEvaluationPair(BaseEvaluationPair):\n \"\"\"An abstract class for the artist evaluators\"\"\"\n\n subj = models.ForeignKey('lastfm.User')\n \"\"\"The subject\"\"\"\n \n obj = models.ForeignKey('lastfm.Artist')\n \"\"\"The object\"\"\"\n \n test_ratio = 0.2\n \"\"\"The ratio of pairs to select to test pairs\"\"\"\n\n class Meta:\n abstract = True \n \n @classmethod \n def select(cls, i=0):\n \"\"\"See the base class for the documentation.\"\"\"\n all_count = Scrobble.objects.count() \n test_count = int(cls.test_ratio * all_count ) \n train_count = all_count - test_count\n \n test_pairs = Scrobble.objects.order_by('-timestamp')[:test_count]\n\n min_stamp_test = test_pairs.aggregate(Min('timestamp'))['timestamp__min']\n train_pairs = Scrobble.objects.filter(timestamp__lt=min_stamp_test)\n \n # take 1/5 of the scrobbles, remove them and put them to test data \n # save to test, remove from build\n for scrobble in test_pairs.iterator():\n\n # create the test pair for the radio recommender\n val_pair = ArtistEvalPair(\n subj=scrobble.user,\n obj=scrobble.track.artist,\n expected_expectancy=EXPECTED_EXPECTANCY_LISTENED,\n )\n \n # if not in train pairs \n if not train_pairs.filter(user=val_pair.subj, track__artist=val_pair.obj).exists():\n\n # and not already in novel test_pairs\n if not NovelArtistEvalPair.objects.filter(subj=val_pair.subj, obj=val_pair.obj).exists():\n \n # add it also to novel test pairs\n n_val_pair = NovelArtistEvalPair(\n subj=val_pair.subj,\n obj=val_pair.obj,\n expected_expectancy=val_pair.expected_expectancy\n )\n n_val_pair.save()\n\n val_pair.save()\n \n # so wie so delete\n scrobble.delete()\n \n nontrivial_count = NovelArtistEvalPair.objects.count()\n \n print \"Test pairs selected from total %d pairs, %d of %d are non-trivial\" % (all_count, nontrivial_count, test_count) \n\n def get_success(self):\n \"\"\"See the base class for the documentation.\"\"\"\n return self.obtained_expectancy > SUCCESS_LIMIT\n \n\n \n \nclass ArtistEvalPair(BaseArtistEvaluationPair):\n \"\"\"An artist - user pair for validation purposes\n \n All test pairs, no matter if contained in training data or not. \n \"\"\" \n\n @classmethod\n def select_random(cls, i=0):\n \"\"\"UNUSED\"\"\"\n\n scrobble_count = Scrobble.objects.all().count()\n \n # get a queryset containing every n-th scrobble\n id_list = range(1 + i, scrobble_count, CROSS_VALIDATION_COUNT)\n qs_validation_scrobbles = Scrobble.objects.filter(id__in=id_list)\n \n # remove the scrobbles and add them to the class\n for scrobble in qs_validation_scrobbles.iterator():\n \n # create and save the validation pair\n val_pair = cls(\n subj=scrobble.user,\n obj=scrobble.track.artist,\n expected_expectancy=EXPECTED_EXPECTANCY_LISTENED) \n val_pair.save()\n \n # remove the scrobble\n scrobble.delete()\n\nclass NovelArtistEvalPair(BaseArtistEvaluationPair):\n \"\"\"Only test pairs that aren't included in the test set\"\"\"\n class Meta:\n unique_together = ('subj', 'obj')\n" }, { "alpha_fraction": 0.6770642399787903, "alphanum_fraction": 0.6880733966827393, "avg_line_length": 24.34883689880371, "blob_id": "133fb2b44e39a49aa2bcbc39b4e0b2716922b53c", "content_id": "c38b85c4a195f444066545a6bcbd5e58d58cf32c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1090, "license_type": "no_license", "max_line_length": 231, "num_lines": 43, "path": "/code/mahout/mahoutrec/unresystrecommend.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# params:\n# lastfm\n# flixster\n# travel\n\n\nLASTFM=false\nFLIXSTER=false\nTRAVEL=false\n\nfor param in $*;\ndo \n case $param in\n 'lastfm')\n LASTFM=true\n ;;\n 'flixster')\n FLIXSTER=true\n ;;\n 'travel')\n TRAVEL=true\n ;; \n esac\ndone\n\nif [ $LASTFM = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystRecommend\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_train.csv 10 /home/pcv/diplomka2/svn/trunk/code/adapter/csv/lastfm_recommendations.csv\" \nfi\n\nif [ $FLIXSTER = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystRecommend\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_train.csv 10 /home/pcv/diplomka2/svn/trunk/code/adapter/csv/flixster_recommendations.csv\" \nfi\n\nif [ $TRAVEL = true ]\nthen\n mvn -e exec:java -Dexec.mainClass=\"com.unresyst.UnresystRecommend\" -Dexec.args=\"/home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_train.csv 10 /home/pcv/diplomka2/svn/trunk/code/adapter/csv/travel_recommendations.csv\" \nfi\n\necho \"\"\n" }, { "alpha_fraction": 0.7937915921211243, "alphanum_fraction": 0.7937915921211243, "avg_line_length": 55.375, "blob_id": "09a23a1f8ea432afe93770da81d70f433cb4da6d", "content_id": "627982f133521056ba61ba62704fe4769be69279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 110, "num_lines": 8, "path": "/UnresystCD/code/adapter/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The home of the adapter django project. \n\nContained applications:\n - unresyst: the universal recommender application containing the implementation of the universal recommedner\n - lastfm: an application containing data models and a Unresyst recommender for the last.fm dataset.\n - flixster: application for the flixster dataset, the same structure as lastfm\n - travel: application for the travel agency dataset, the same structure as lastfm\n\"\"\"\n" }, { "alpha_fraction": 0.6514772176742554, "alphanum_fraction": 0.6554832458496094, "avg_line_length": 31.639345169067383, "blob_id": "5cff1464dd01874dd53e68affc31a072f820b58a", "content_id": "fb84b5471f3093d38a41c109c7e10a8e55cf2638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1997, "license_type": "no_license", "max_line_length": 93, "num_lines": 61, "path": "/UnresystCD/code/adapter/unresyst/models/algorithm.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Models which instances are created by the algorithm package.\n\nThe preference between the subject and object in the means of \nthe predicted relationship.\n\"\"\"\n\nfrom django.db import models\n\nfrom base import BaseRelationshipInstance\n\nclass RelationshipPredictionInstance(BaseRelationshipInstance):\n \"\"\"The preference between the subject and object in the means of \n the predicted relationship.\n \"\"\"\n \n expectancy = models.FloatField()\n \"\"\"The probability of the predicted relationship appearance between \n the subject and the object. A number from [0, 1]. \n \"\"\"\n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender it belongs to\"\"\"\n \n is_uncertain = models.BooleanField(default=False)\n \"\"\"Is the prediction uncertain? (We don't know anything about the pair)\"\"\"\n \n is_trivial = models.BooleanField(default=False)\n \"\"\"Is the prediction trivial? (Already contained in the train data)\"\"\"\n \n class Meta:\n app_label = 'unresyst' \n unique_together = ('subject_object1', 'subject_object2', 'recommender')\n\n def __unicode__(self):\n return \"(%s, %s), %f\" % (self.subject_object1, self.subject_object2, self.expectancy)\n\n \nclass ExternalPrediction(models.Model):\n \"\"\"A prediction taken from an external recommender.\n \"\"\" \n \n subj_id = models.IntegerField()\n \"\"\"The id of the subject\"\"\"\n \n obj_id = models.IntegerField()\n \"\"\"The if of the object\"\"\"\n \n expectancy = models.FloatField()\n \"\"\"The probability of the predicted relationship appearance between \n the subject and the object. A number from [0, 1]. \n \"\"\" \n \n recommender = models.ForeignKey('unresyst.Recommender')\n \"\"\"The recommender it belongs to\"\"\" \n \n class Meta:\n app_label = 'unresyst' \n unique_together = ('subj_id', 'obj_id', 'recommender')\n\n def __unicode__(self):\n return \"(%s, %s), %f\" % (self.subj_id, self.obj_id, self.expectancy) \n" }, { "alpha_fraction": 0.5508274435997009, "alphanum_fraction": 0.5508274435997009, "avg_line_length": 22.27777862548828, "blob_id": "b3e488fef73f2bf74b2bd4f721d639a4d867b199", "content_id": "4160764ea3b97a155d2f0c99fd77eab10980d7ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/UnresystCD/code/adapter/demo/urls.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Urls for the demo application\"\"\"\n\nfrom django.conf.urls.defaults import *\n\nurlpatterns = patterns('demo.views',\n \n # shoe recommendations\n url(regex=r'^user/(?P<user_name>[\\w-]+)/shoe-recommendations/$',\n view='view_recommendations', \n name=\"recommendations\"\n ),\n\n # home page\n url(regex=r'^$',\n view='view_home_page',\n name='home_page'\n ) \n) \n" }, { "alpha_fraction": 0.5290427803993225, "alphanum_fraction": 0.5329925417900085, "avg_line_length": 31.786258697509766, "blob_id": "ad1cde0ed7345882ec924ec7c3e2862b9cfa53d7", "content_id": "7b0edd5c2b209e4c3de06222c5a0623a05db94da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4304, "license_type": "no_license", "max_line_length": 142, "num_lines": 131, "path": "/UnresystCD/code/adapter/flixster/recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The flixster recommender\"\"\"\n\nfrom django.db.models import Avg\n\nfrom unresyst import *\nfrom models import *\nfrom constants import *\n\ndef _rated_positively_generator():\n \"\"\"The generator to the predicted relationship\"\"\"\n for r in Rating.objects.filter(rating__gt=str(MIN_POSITIVE_RATING)):\n yield (r.user, r.movie)\n\nclass MovieRecommender(Recommender):\n \"\"\"The flixster movie recommender\"\"\"\n\n name = \"Flixster Movie Recommender\"\n \"\"\"The name\"\"\" \n \n subjects = User.objects\n \"\"\"The objects to who the recommender will recommend.\"\"\"\n \n objects = Movie.objects\n \"\"\"The objects that will be recommended.\"\"\" \n\n predicted_relationship = PredictedRelationship( \n name=\"User has rated the movie positively.\",\n condition=None, \n description=\"\"\"User %(subject)s has rated the %(object)s positively.\"\"\",\n generator=_rated_positively_generator,\n )\n \"\"\"The relationship that will be predicted\"\"\"\n \n rules = (\n # explicit rating\n ExplicitSubjectObjectRule(\n name=\"Movie rating.\",\n \n description=\"User %(subject)s has rated %(object)s.\",\n \n # all pairs user, rated movie\n generator=lambda: [(r.user, r.movie) for r in Rating.objects.all()],\n \n # the number of stars divided by five\n expectancy=lambda s, o:float(Rating.objects.get(user=s, movie=o).rating) / MAX_STARS,\n ),\n )\n \"\"\"The rules\"\"\"\n\n relationships = (\n # users in friendship\n SubjectSimilarityRelationship(\n name=\"Users are friends.\",\n \n generator=lambda: [(f.friend1, f.friend2) for f in Friend.objects.all()], \n \n is_positive=True, \n \n weight=0.1, \n \n description=\"Users %(subject1)s and %(subject2)s are friends.\",\n ),\n )\n \"\"\"The relationships\"\"\"\n \n biases = (\n \n # people giving high ratings\n SubjectBias(\n name=\"Users giving high ratings.\",\n \n description=\"User %(subject)s gives high ratings.\",\n \n weight=1.0, \n \n is_positive=True,\n \n generator=lambda: User.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__gt=str(MIN_HIGH_RATING)), \n \n confidence=lambda user: user.rating_set.aggregate(Avg('rating'))['rating__avg'] - MIN_HIGH_RATING\n ),\n \n # highly rated movies\n ObjectBias(\n name=\"High-rated movies.\",\n \n description=\"Movie %(object)s is high-rated\",\n \n weight=1.0,\n \n is_positive=True,\n \n generator=lambda: Movie.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__gt=str(MIN_HIGH_RATING)),\n \n confidence=lambda movie: movie.rating_set.aggregate(Avg('rating'))['rating__avg'] - MIN_HIGH_RATING\n ),\n \n # people giving low ratings\n SubjectBias(\n name=\"Users giving low ratings.\",\n \n description=\"User %(subject)s gives low ratings.\",\n \n weight=0.25, \n \n is_positive=False,\n \n generator=lambda: User.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__lt=str(MAX_LOW_RATING)), \n \n confidence=lambda user: MAX_LOW_RATING - user.rating_set.aggregate(Avg('rating'))['rating__avg'] \n ),\n \n # low-rated movies\n ObjectBias(\n name=\"Low-rated movies.\",\n \n description=\"Movie %(object)s is low-rated\",\n \n weight=0.25,\n \n is_positive=False,\n \n generator=lambda: Movie.objects.annotate(avg_rating=Avg('rating__rating')).filter(avg_rating__lt=str(MAX_LOW_RATING)),\n \n confidence=lambda movie: MAX_LOW_RATING - movie.rating_set.aggregate(Avg('rating'))['rating__avg']\n ), \n \n \n )\n\nMovieRecommender.explicit_rating_rule = MovieRecommender.rules[0] \n \n" }, { "alpha_fraction": 0.6794871687889099, "alphanum_fraction": 0.692307710647583, "avg_line_length": 18.5, "blob_id": "e1ecc50b56bc9bcd857699933ad1343cd1eb6e3f", "content_id": "2c326e581ce012116baf0dbe73f6af9e0588ee6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 78, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/code/adapter/load.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# load the database from the given file\nmysql -u root adapter < $1\n" }, { "alpha_fraction": 0.5593509078025818, "alphanum_fraction": 0.5637439489364624, "avg_line_length": 39.85714340209961, "blob_id": "de67b101372b2d6ca574db8bb84fed49f465a847", "content_id": "fddd4bd233c7e3d4c7308f87998218274cd809ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11154, "license_type": "no_license", "max_line_length": 96, "num_lines": 273, "path": "/code/adapter/unresyst/compilator/get_first_compilator.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The basic compilator used in Unresyst\"\"\"\n\nfrom base import BaseCompilator\nfrom unresyst.constants import *\nfrom unresyst.models.aggregator import AggregatedRelationshipInstance\nfrom unresyst.models.abstractor import RelationshipInstance\nfrom unresyst.models.algorithm import RelationshipPredictionInstance\n\nclass GetFirstCompilator(BaseCompilator):\n \"\"\"Compilator using the first relationship it finds to create a prediction\"\"\"\n \n def __init__(\n self, \n depth=DEFAULT_COMPILATOR_DEPTH, \n breadth=DEFAULT_COMPILATOR_BREADTH, \n pair_depth=DEFAULT_COMPILATOR_PAIR_DEPTH):\n \"\"\"The initializer\"\"\" \n \n super(GetFirstCompilator, self).__init__(\n combinator=None, \n depth=depth, \n breadth=breadth, \n pair_depth=pair_depth) \n\n\n def compile_all(self, recommender_model):\n \"\"\"Compile preferences, known relationships + similarities.\n \"\"\"\n self.compile_aggregates(recommender_model)\n\n print \" Compiling similar objects.\"\n \n # if subjects == objects\n if recommender_model.are_subjects_objects:\n\n # take similar on both sides\n self._compile_similar_subjectobjects(recommender_model)\n\n else:\n \n # take similar to the ones we already have (content-based recommender)\n self._compile_similar_objects(recommender_model)\n print \" Done. Compiling similar subjects.\"\n\n # take liked objects of similar users (almost collaborative filtering)\n self._compile_similar_subjects(recommender_model) \n \n\n #TODO pryc, asi nebude potreba\n def compile_aggregates(self, recommender_model):\n \"\"\"Create predictions from aggregates\"\"\" \n \n # filter only S-O or SO-SO aggregates\n #\n rel_type = RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT \\\n if recommender_model.are_subjects_objects else \\\n RELATIONSHIP_TYPE_SUBJECT_OBJECT \n \n qs_aggr = AggregatedRelationshipInstance.objects.filter(\n recommender=recommender_model,\n relationship_type=rel_type)\n\n # go through the aggregates, create predictions and save them\n for aggr in qs_aggr.iterator():\n\n # order the arguments as they should be \n so1, so2 = self._order_in_pair(aggr.subject_object1, aggr.subject_object2)\n\n prediction = RelationshipPredictionInstance(\n subject_object1=so1,\n subject_object2=so2,\n description=aggr.description,\n expectancy=aggr.expectancy,\n recommender=recommender_model)\n \n prediction.save() \n \n print \" %d aggregated predictions created\" % qs_aggr.count()\n \n \n def _compile_similar_objects(self, recommender_model):\n \"\"\"Create predictions by adding objects similar to ones the objects\n in predicted_relationship - that's a content-based recommender\n \"\"\" \n \n self._compile_similar_entities(\n recommender_model=recommender_model,\n start_entity_type=ENTITY_TYPE_SUBJECT)\n \n def _compile_similar_subjects(self, recommender_model):\n \"\"\"Create predictions by adding objects that similar subjects liked\n - that's collaborative filtering\n \"\"\" \n \n self._compile_similar_entities(\n recommender_model=recommender_model,\n start_entity_type=ENTITY_TYPE_OBJECT)\n\n\n def _compile_similar_subjectobjects(self, recommender_model):\n \"\"\"Create predictions based on similarity for recommenders where \n subjects==objects\n \"\"\"\n \n # firstly the normal direction\n self._compile_similar_entities(\n recommender_model=recommender_model,\n start_entity_type=ENTITY_TYPE_SUBJECTOBJECT,\n reverse=False)\n \n # secondly the opposite\n self._compile_similar_entities(\n recommender_model=recommender_model,\n start_entity_type=ENTITY_TYPE_SUBJECTOBJECT,\n reverse=True)\n\n def _order_in_pair(self, arg1, arg2):\n \"\"\"Swap the arguments in the rule/relationships so that the first\n has a lower id than the second (for subjectobjects), or the subject\n is the first (for others)\n \"\"\"\n # for subjectobject return ordered by pk\n if arg1.entity_type == ENTITY_TYPE_SUBJECTOBJECT:\n if arg2.pk < arg1.pk:\n return (arg2, arg1)\n return (arg1, arg2)\n # for others return subject first \n if arg2.entity_type == ENTITY_TYPE_SUBJECT:\n return (arg2, arg1)\n return (arg1, arg2)\n\n SIMILARITY_RELATIONSHIP_TYPES = {\n ENTITY_TYPE_SUBJECT: RELATIONSHIP_TYPE_OBJECT_OBJECT,\n ENTITY_TYPE_OBJECT: RELATIONSHIP_TYPE_SUBJECT_SUBJECT,\n ENTITY_TYPE_SUBJECTOBJECT: RELATIONSHIP_TYPE_SUBJECTOBJECT_SUBJECTOBJECT\n } \n \"\"\"Dictionary entity type: relationship entity type.\n Key is the entity type where the traversing for similarity starts, value\n is the relationship type that should be traversed for similarity.\n \"\"\"\n \n ORDERING = {\n ENTITY_TYPE_OBJECT: \"subject_object1__pk\",\n ENTITY_TYPE_SUBJECT: \"subject_object2__pk\",\n ENTITY_TYPE_SUBJECTOBJECT: \"subject_object1__pk\",\n }\n \"\"\"Dictinary entity type (starting) - name of the attribute in PredictedRelationship, \n that should be used for sorting\"\"\"\n \n \n def _compile_similar_entities(self, recommender_model, start_entity_type, reverse=False):\n \"\"\"Create predictions from start_entity_type objects, looking for \n similar entities in end_entity_type\n \n @type recommender_model: models.common.Recommender\n @param recommender_model: the model the for which the predictions\n should be built\n\n @type start_entity_type: str\n @param start_entity_type: the entity type, where to start searching for\n similar entities. E.g. if start_entity_type is 'S', similar 'O' \n will be added to predictions to each 'S'.\n\n @type reverse: bool\n @param reverse: relevant only if start_entity_type=='SO', indicates \n whether the relationships will be traversed in the reverse order. \n \"\"\" \n \n # get the predictions for this recommender\n qs_predictions = RelationshipPredictionInstance.objects.filter(\n recommender=recommender_model)\n\n ordering = self.ORDERING[start_entity_type]\n \n # go through the predicted relationship instances \n # (objects that subjects liked)\n # ordered as needed\n qs_pred_rel_instances = RelationshipInstance\\\n .filter_predicted(recommender_model)\\\n .order_by(ordering)\\\n .select_related(depth=1)\n \n # the caching variable \n last_fin = None \n last_qs = None \n \n print \"Predicted relationship count: %d\" % qs_pred_rel_instances.count()\n \n i = 0\n \n # count neighbourhood\n count_n = 0\n \n # count all \n count_all = 0\n \n for pred_inst in qs_pred_rel_instances.iterator():\n \n i += 1\n # get the subject and object from the relationship instance\n start, fin = (pred_inst.subject_object1, pred_inst.subject_object2) \\\n if pred_inst.subject_object1.entity_type == start_entity_type \\\n else (pred_inst.subject_object2, pred_inst.subject_object1)\n \n # if they are subjectobjects and reversed swap them\n if start_entity_type == ENTITY_TYPE_SUBJECTOBJECT and reverse:\n start, fin = fin, start\n \n similarity_relationship_type = self.SIMILARITY_RELATIONSHIP_TYPES[start_entity_type]\n \n # if the ending entity is the same as the last time, use the cached qs\n if last_fin == fin:\n qs_similar_rels = last_qs\n else: \n # get objects similar to obj (whole relationships) - only breadth highest\n qs_similar_rels = AggregatedRelationshipInstance\\\n .get_relationships(fin)\\\n .filter(relationship_type=similarity_relationship_type)\\\n .order_by('-expectancy')\n \n # current count of theoretically available predictions\n cur_count_all = qs_similar_rels.count()\n \n qs_similar_rels = qs_similar_rels[:self.breadth]\\\n .select_related(depth=1)\n\n # keep it for the next time\n last_fin = fin\n last_qs = qs_similar_rels\n \n\n count = qs_similar_rels.count()\n\n count_all += cur_count_all\n count_n += count\n \n if count and i % 1000 == 0:\n print \"similar count: %d; relationships processed: %d\" % (count, i)\n import gc; gc.collect()\n \n # go through them \n for similar_rel in qs_similar_rels.iterator():\n \n # get the object similar to obj from the relationship\n similar_fin = similar_rel.get_related(fin)\n\n # find out whether there exists a prediction for the pair\n qs_pair_predictions = RelationshipPredictionInstance\\\n .filter_relationships(\n object1=start,\n object2=similar_fin,\n queryset=qs_predictions)\n\n # if exists, keep it there, ignore\n if qs_pair_predictions.exists():\n continue\n \n # order the arguments as they should be \n so1, so2 = self._order_in_pair(start, similar_fin) \n\n # if not, create it with the attributes of the similarity \n # relationship instance\n prediction = RelationshipPredictionInstance(\n subject_object1=so1,\n subject_object2=so2,\n description=similar_rel.description,\n expectancy=similar_rel.expectancy,\n recommender=recommender_model)\n \n prediction.save() \n \n print \"For starting entity type %s, %d out of %d possible relationships created\" \\\n % (start_entity_type, count_n, count_all)\n" }, { "alpha_fraction": 0.5061048865318298, "alphanum_fraction": 0.5157351493835449, "avg_line_length": 34.650306701660156, "blob_id": "b7c406a6e4ef6fc0fe902a5c1c7208eac1bf1810", "content_id": "d2021e3563ad203310c773135625624c376a9466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5815, "license_type": "no_license", "max_line_length": 146, "num_lines": 163, "path": "/UnresystCD/code/adapter/lastfm/recbckp.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": " rules = ( \n \n # don't recommend artists with male-specific tags to females\n SubjectObjectRule(\n name=\"Don't recommend male music to female users.\",\n\n # the user is a female and the artist was tagged by\n # a male-specific tag\n condition=lambda user, artist: user.gender == 'f' and \\\n artist.artisttag_set.filter(tag__gender_specific='m').exists()\n \n # it's a negative rule\n is_positive=False,\n \n weight=0.5,\n \n # the more male-specific tags the artist has, the higher is \n # the rule confidence. Normalized by the artist tag count\n confidence=lambda user, artist: float(\n artist.artisttag_set.filter(tag__gender_specific='m').count())/ \\\n artist.artisttag_set.count(),\n \n description=\"Artist %(object)s isn't recommended to %(subject)s, \" +\n \"because the artist is considered male-specific.\"\n )\n \n \n # users of similar age are similar\n SubjectSimilarityRule(\n name=\"Users with similar age.\",\n \n # both users have given their age and the difference \n # is lower than five\n condition=lambda user1, user2: \n user1.age and user2.age and abs(user1.age - user2.age) <= 5,\n \n is_positive=True, \n \n weight=0.5,\n \n # a magic linear confidence function\n confidence=lambda user1, user2: \n 1 - float(abs(user1.age - user2.age))/AGE_DIFFERENCE,\n \n description=\"Users %(subject1)s and %(subject2)s are about \" + \n \"the same age.\"\n ), \n \n # artists sharing some tags are similar\n ObjectSimilarityRule(\n name=\"Artists sharing some tags.\",\n\n # both artists have some tags and they share at least one tag\n # generator - take artists having some tags, compare them one to one\n generator=_tag_similarity_generator,\n \n \n # it's a positive rule\n is_positive=True,\n \n weight=0.5,\n \n # The more tags the artists have in common, the higher is \n # the similarity confidence\n confidence=lambda artist1, artist2: \\\n float(artist1.artisttag_set.filter(\n tag__id__in=artist2.artisttag_set.values_list('tag__id')\n ).count()) / \\\n min(artist1.artisttag_set.count(), artist2.artisttag_set.count()),\n \n description=\"Artists %(object1)s and %(object2)s are similar \" + \\\n \"because they share some tags.\"\n ), \n \n \n # if the users were registered in a similar period, the're similar\n SubjectSimilarityRule(\n name='Users registered in similar time.',\n \n condition=lambda s1, s2:\n s1.registered and s2.registered and \\\n abs(s1.registered.toordinal() - s2.registered.toordinal()) < REGISTERED_DIFFERENCE / 5,\n \n is_positive=True,\n weight=0.5,\n \n confidence=lambda s1, s2:\n 1 - float(abs(s1.registered.toordinal() - s2.registered.toordinal()))/REGISTERED_DIFFERENCE,\n \n description=\"Users %(subject1)s and %(subject2)s were registered in similar times\",\n ),\n )\n \n cluster_sets = (\n\n \n # user - gender\n SubjectClusterSet(\n \n name='User gender.',\n \n weight=0.5,\n \n # users that have a gender (filled)\n filter_entities=User.objects.exclude(gender=''),\n \n get_cluster_confidence_pairs=lambda user: ((user.gender, 1),),\n \n description=\"%(subject)s's gender is %(cluster)s.\"\n \n ),\n \n # user - country\n SubjectClusterSet(\n \n name='User country.',\n \n weight=0.5,\n \n # users that have country filled\n filter_entities=User.objects.filter(country__isnull=False),\n \n get_cluster_confidence_pairs=lambda user: ((user.country.name, 1),),\n \n description=\"%(subject)s is from %(cluster)s.\"\n ),\n )\n \n biases = (\n ObjectBias(\n name=\"Artists whose tracks were listened the most.\",\n description=\"%(object)s is much listened.\",\n weight=0.5,\n is_positive=True,\n # users whose tracks were listened more than the half of the most listened\n generator=lambda: Artist.objects.annotate(listen_count=Count('track__scrobble')).filter(listen_count__gt=MAX_SCROBBLE_COUNT/7),\n \n # the number of scrobbles for the artist divided by the max.\n confidence=lambda a: float(a.track_set.annotate(scrobble_count=Count('scrobble')).aggregate(Sum('scrobble_count')))/MAX_SCROBBLE_COUNT\n ),\n \n ) \n \n \n \n \n \n \n \n # tag clusters\n ObjectClusterSet(\n\n name=\"Artist tags.\",\n\n weight=0.5,\n \n # artists that are tagged by a tag that another artist also has\n filter_entities=Artist.objects.annotate(shared_count=Count('artisttag__tag__artisttag')).filter(shared_count__gt=1).distinct(),\n \n get_cluster_confidence_pairs=_get_artist_tag_pairs,\n \n description=\"%(object)s was tagged as %(cluster)s.\",\n ),\n" }, { "alpha_fraction": 0.5247273445129395, "alphanum_fraction": 0.5608845353126526, "avg_line_length": 26.657024383544922, "blob_id": "94c6ae64ba47d9b97d88b8812b92c95e32dfa0b7", "content_id": "a1c2c5fae057744714787879bd4a97cd774cc388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6693, "license_type": "no_license", "max_line_length": 159, "num_lines": 242, "path": "/UnresystCD/code/adapter/lastfm/save_data.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Saving data from the last.fm datasets\"\"\"\n\nimport csv\nimport os\nfrom datetime import datetime\nfrom dateutil.parser import parse\nfrom dateutil.tz import tzutc\n\nfrom models import *\n\nTZ = tzutc()\n\"\"\"The used timezone\"\"\"\n\nGENDER_SPECIFIC_TAGS = {\n 'punk': 'm',\n 'metal': 'm',\n 'punk rock': 'm',\n 'hardcore': 'm',\n 'hard rock': 'm',\n 'industrial': 'm',\n 'post-punk': 'm',\n 'post-rock': 'm',\n 'heavy metal': 'm',\n 'metalcore': 'm',\n 'death metal': 'm',\n 'noise': 'm',\n 'hardcore punk': 'm',\n\n 'female': 'f',\n 'beautiful': 'f',\n 'Love': 'f',\n 'lovely': 'f',\n 'sweet': 'f',\n 'singer-songwriter': 'f',\n 'singer songwriter': 'f',\n 'woman': 'f',\n 'mellow': 'f',\n 'dream pop': 'f',\n 'soft': 'f',\n 'emotional': 'f',\n 'cut': 'f',\n}\n\"\"\"Tags that are specific for the given gender.\n\"\"\"\n\n\"\"\"\n7141 artists\n992 users\n19903 tracks\n22666 scrobbles\n\n\n\n# relative paths to the dataset files\nfilename_scrobbles = '../../datasets/lastfm/1K/tracks30000.tsv'\nfilename_users = '../../datasets/lastfm/1K/userid-profile.tsv'\nfilename_tags = '../../datasets/lastfm/Lastfm-ArtistTags2007/artist_tags200K.tsv'\n\"\"\"\n\n\"\"\"\n2348 artists\n100 users\n5057 tracks\n5532 scrobbles\n9120 tags\n28588 tagartists\n322 tagged artists\n\nmax age 38\nmin age 17\n\"\"\"\n# relative paths to the dataset files\nfilename_scrobbles = '../../datasets/lastfm/mini/scrobbles.tsv'\nfilename_users = '../../datasets/lastfm/mini/users.tsv'\nfilename_tags = '../../datasets/lastfm/mini/tags.tsv'\n\nseparator = '\\t'\n\ndef save_data():\n \"\"\"Save the data\"\"\"\n _parse_users(filename_users)\n print \"Users saved.\"\n \n _parse_scrobbles(filename_scrobbles)\n print \"Scrobbles saved.\"\n \n _parse_tags(filename_tags)\n print \"Tags saved.\"\n\n\"\"\"\nuser_000001\tm\t\tJapan\tAug 13, 2006\nuser_000002\tf\t\tPeru\tFeb 24, 2006\nuser_000003\tm\t22\tUnited States\tOct 30, 2005\n\"\"\"\n\ndef _parse_users(filename):\n \"\"\"Parse the user csv file\"\"\"\n \n filename = _get_abs_path(filename)\n\n # open the csv reader\n reader = csv.reader(open(filename, \"rb\"), delimiter=separator, quoting=csv.QUOTE_NONE)\n \n for user_id, gender, age, country, reg_date in reader:\n \n # parse the user id\n user_id = _parse_user_id(user_id)\n \n # replace the empty string by None\n age = None if not age else age\n \n # find or create the country\n if country:\n country_model, created = Country.objects.get_or_create(name=country)\n else:\n country_model = None\n \n # parse the date\n reg_date = None if not reg_date else datetime.strptime(reg_date, '%b %d, %Y')\n \n # create and save the user\n user = User(\n id=user_id,\n gender=gender,\n age=age,\n country=country_model,\n registered=reg_date)\n \n user.save()\n\n\"\"\"\n user_000639 2009-04-08T01:57:47Z 15676fc4-ba0b-4871-ac8d-ef058895b075 The Dogs D'Amour 6cc252d0-3f42-4fd3-a70f-c8ff8b693aa4 How Do You Fall in Love Again\n\"\"\"\n\ndef _parse_scrobbles(filename):\n \"\"\"Parse the scrobbles by the users\"\"\"\n\n filename = _get_abs_path(filename)\n \n reader = csv.reader(open(filename, \"rb\"), delimiter=separator, quoting=csv.QUOTE_NONE)\n for user_id, timestamp, artist_guid, artist_name, track_guid, track_name in reader:\n\n if reader.line_num % 1000 == 0:\n print '%s lines processed' % reader.line_num \n \n # if the track isn't in the musicbrainz db, continue\n if not track_guid or not artist_guid:\n continue\n \n # parse the user id and find the user\n user_id = _parse_user_id(user_id)\n user = User.objects.get(id=user_id)\n \n # parse the date and put it to the UTC timezone\n timestamp = parse(timestamp).astimezone(TZ).replace(tzinfo=None)\n \n # get or create the artist\n artist, created = Artist.objects.get_or_create(\n guid=artist_guid,\n defaults={'name': artist_name})\n \n # get or crate the track\n track, created = Track.objects.get_or_create(\n guid=track_guid, \n defaults= {'artist': artist, 'name': track_name})\n \n \n # create and save the scrobble\n scrobble = Scrobble(\n user=user,\n timestamp=timestamp,\n track=track)\n \n scrobble.save() \n \n\n\"\"\"\n 11eabe0c-2638-4808-92f9-1dbd9c453429<sep>Deerhoof<sep>american<sep>14\n 11eabe0c-2638-4808-92f9-1dbd9c453429<sep>Deerhoof<sep>animals<sep>5\n 11eabe0c-2638-4808-92f9-1dbd9c453429<sep>Deerhoof<sep>art punk<sep>2\n\"\"\" \n\ndef _parse_tags(filename):\n \"\"\"Parse the tags\"\"\"\n\n filename = _get_abs_path(filename)\n \n reader = csv.reader(open(filename, \"rb\"), delimiter=separator, quoting=csv.QUOTE_NONE)\n \n last_artist = Artist.objects.all()[0]\n \n for artist_guid, artist_name, tag_name, count in reader:\n\n if reader.line_num % 1000 == 0:\n print '%s lines processed' % reader.line_num \n \n # some caching for more speed\n if artist_guid == last_artist.guid:\n artist = last_artist\n else: \n # try finding the artist\n qs_artist = Artist.objects.filter(guid=artist_guid)\n \n # if not found go ahead\n if not qs_artist:\n continue\n \n artist = qs_artist[0]\n \n # if the name is strange, go ahead\n if artist.name != artist_name:\n continue\n \n # get the gender specificity\n if GENDER_SPECIFIC_TAGS.has_key(tag_name):\n gender_specific = GENDER_SPECIFIC_TAGS[tag_name]\n else:\n gender_specific = ''\n \n # get or create the tag\n tag, created = Tag.objects.get_or_create(\n name=tag_name, \n defaults={'gender_specific': gender_specific,}) \n \n # create and save the tag for the artist \n artist_tag = ArtistTag(\n artist=artist,\n tag=tag,\n count=count)\n \n artist_tag.save() \n \n last_artist = artist \n \n \ndef _get_abs_path(filename):\n \"\"\"Get the absolute path from the relative\"\"\"\n return os.path.join(os.path.dirname(__file__), filename)\n \ndef _parse_user_id(user_id):\n \"\"\"Get the integer id from the id string\"\"\"\n return int(user_id.split('_')[1])\n" }, { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 27.66666603088379, "blob_id": "0cba3557c9d5312194d5d95f8c3b15e006820481", "content_id": "7301d27729a9c8643e0f847085cd196f315d649d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/UnresystCD/code/adapter/travel/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Application holding the data and a recommender for \nthe travel agency dataset.\n\"\"\"\n" }, { "alpha_fraction": 0.7663999795913696, "alphanum_fraction": 0.7663999795913696, "avg_line_length": 38.0625, "blob_id": "2829d1381995c103e68d0afad6485f4b995717a9", "content_id": "7e1456486a0e2d20d616f2940869865ca593246f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 80, "num_lines": 16, "path": "/code/adapter/lastfm/mahout_recommender.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The representation of the Mahout recommender in Unresyst.\"\"\"\n\nfrom unresyst.recommender.external_recommender import ExternalRecommender\nfrom recommender import ArtistRecommender, NovelArtistRecommender\n\nclass MahoutArtistRecommender(ExternalRecommender, ArtistRecommender):\n \"\"\"An external artist recommender - both novel and non-novel artists\"\"\"\n \n name = \"Mahout artist recommender\"\n \"\"\"The name\"\"\"\n \nclass NovelMahoutArtistRecommender(ExternalRecommender, NovelArtistRecommender):\n \"\"\"External recommender for novel artists only\"\"\"\n \n name = \"Novel Mahout artist recommender\"\n \"\"\"The name\"\"\"\n" }, { "alpha_fraction": 0.7530864477157593, "alphanum_fraction": 0.7530864477157593, "avg_line_length": 26, "blob_id": "2cc663c22636ab3d282b4a3c9e490836aa81eb0f", "content_id": "447bdfd1b9fd756e0f53ccfe0aae9a1f97c85ff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/code/adapter/flixster/__init__.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Application holding the data and a recommender for \nthe Flixster dataset.\n\"\"\"\n" }, { "alpha_fraction": 0.5634920597076416, "alphanum_fraction": 0.5634920597076416, "avg_line_length": 30.25, "blob_id": "9536a8e1ff67ca685db02dbfb9009d15abafec92", "content_id": "2cdd98fc94573b198df43eb36e8ea8ab59478acf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 126, "license_type": "no_license", "max_line_length": 75, "num_lines": 4, "path": "/code/adapter/diff.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# diffovat v meldu zmenene soubory.\nsvn diff --diff-cmd meld `svn stat | grep \"M \" | sed -e 's/M //'`\n\n" }, { "alpha_fraction": 0.6452485918998718, "alphanum_fraction": 0.6459249258041382, "avg_line_length": 32.98850631713867, "blob_id": "ab43235feb01e51bb9cf20b564b11fb6ae1c6eb9", "content_id": "9203604c83a4ceb6773366cba34dbea8b44ca395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2957, "license_type": "no_license", "max_line_length": 100, "num_lines": 87, "path": "/UnresystCD/code/adapter/unresyst/recommender/metrics.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The functions used as metrics for the evaluators\"\"\"\nimport math\n\n@classmethod\ndef rmse(cls):\n \"\"\"Count and print the result of the RMSE on the evaluation pair model\n \n @type EvaluationPairModel: a model, BaseEvaluationPair subclass\n @param EvaluationPairModel: a model to evaluate on \n \n @rtype: float\n @return: the RMSE of the predictions\n \"\"\"\n \n # get the expected, obtained expectancy pairs\n exp_pairs = cls.EvaluationPairModel.objects.values_list(\n 'expected_expectancy', 'obtained_expectancy') \n \n # count the sum of the squares of the differences\n square_sum = sum([pow(exp_e - obt_e, 2) for exp_e, obt_e in exp_pairs])\n \n # get the rmse\n ret_rmse = math.sqrt(square_sum / len(exp_pairs))\n print \"RMSE: %f\" % ret_rmse\n \n return ret_rmse\n\n \n@classmethod \ndef precision_recall(cls, count):\n \"\"\"Count and print the Precision/Recall measure. \n \n Both are counted as per-user average\n \n Hit: an object that is both in the recommendation and in the evaluation set.\n \n Precision: number of hits / recommendation list size (=count)\n If a hit appears for an subject-object that is multiple time present in the\n test set, it's counted only once.\n \n Recall: number of hits / number of objects for the subject in the evaluation test\n If a hit appears for an subject-object that is multiple time present in the\n test set, it's counted multiple times.\n \n @type count: int\n @param count: the number of objects in recommendation lists\n \n @rtype: pair float float\n @return: precision, recall \n \"\"\"\n \n # hits are successful pairs\n qs_hit_pairs = cls.EvaluationPairModel.objects.filter(is_successful=True)\n \n # take ids of subjects from the test pairs \n subj_id_list = cls.EvaluationPairModel.objects.values_list('subj__pk', flat=True).distinct() \n \n subj_count = len(subj_id_list)\n \n unique_hit_pairs = qs_hit_pairs.values_list('subj__pk', 'obj__pk').distinct()\n \n # count the precision as explained in docstring\n # as count is the same for all users this can be counted at once\n precision = float(len(unique_hit_pairs)) / (subj_count * count)\n print \"Precision: %f\" % precision\n \n recall_sum = 0\n \n # count the average recall / user\n for subj_id in subj_id_list:\n \n # count the parameters per user \n user_hit_count = qs_hit_pairs.filter(subj__pk=subj_id).count()\n possible_hit_count = cls.EvaluationPairModel.objects.filter(subj__pk=subj_id).count()\n \n subj_recall = float(user_hit_count) / possible_hit_count\n\n #print \"%d precision: %f\" % (subj_id, float(user_hit_count)/count)\n #print \"%d recall: %f\" % (subj_id, subj_recall)\n \n recall_sum += subj_recall\n \n recall = recall_sum / subj_count \n\n print \"Recall: %f\" % recall\n \n return (precision, recall)\n" }, { "alpha_fraction": 0.5109958052635193, "alphanum_fraction": 0.5757350921630859, "avg_line_length": 21.685392379760742, "blob_id": "65928876245a1c3845fe7a61dea8aea090510c12", "content_id": "1bf2649748b12465206a5401d6fa857b5b29019b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4047, "license_type": "no_license", "max_line_length": 90, "num_lines": 178, "path": "/code/adapter/flixster/save_data.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"Saving data from csv files\"\"\"\nimport csv\nimport os\n\nfrom django.db.models import Count\n\nfrom models import *\n\n# relative paths to the dataset files\nfilename_links = '../../datasets/flixster/mini/links.tsv'\nfilename_ratings = '../../datasets/flixster/mini/ratings.tsv'\n\nseparator = '\\t'\n\n\"\"\"\nBefore the removal:\n\n>>> User.objects.count()\n1269\n>>> Friend.objects.count()\n4439\n>>> Movie.objects.count()\n4237\n>>> Rating.objects.count()\n14195\n\nAfter the removal:\n\n>>> User.objects.count()\n418\n>>> Friend.objects.count()\n1051\n\n>>> MovieRecommenderEvaluator.select_evaluation_pairs()\n2839 test pairs selected from total 14195 pairs\n\"\"\"\n\nmax_user_id = 1520\n\ndef _get_abs_path(filename):\n \"\"\"Get the absolute path from the relative\"\"\"\n return os.path.join(os.path.dirname(__file__), filename)\n\ndef save_data():\n \"\"\"Save the data\"\"\"\n _parse_links(filename_links)\n print \"Users and links saved.\"\n \n _parse_ratings(filename_ratings)\n print \"Ratings saved.\" \n \n # remove things we don't like\n \n # remove users without rating - they can't be tested anyhow \n User.objects.annotate(rating_count=Count('rating')).filter(rating_count=0).delete()\n print \"Disliked stuff deleted.\" \n\n\n\"\"\"\n6\t1349\n6\t1370\n6\t1375\n6\t1761\n6\t1770\n6\t1775\n6\t1785\n6\t1797\n6\t1803\n6\t1810\n6\t1819\n6\t1825\n6\t1831\n\"\"\" \n \ndef _parse_links(filename):\n \"\"\"Parse the user csv file\"\"\"\n \n filename = _get_abs_path(filename)\n \n last_user = User(id=0) \n\n # open the csv reader\n reader = csv.reader(open(filename, \"rb\"), delimiter=separator, quoting=csv.QUOTE_NONE)\n \n for user_id, friend_id in reader:\n \n if reader.line_num % 5000 == 0:\n print '%s lines processed' % reader.line_num \n \n # parse the user ids\n user_id = int(user_id)\n friend_id = int(friend_id) \n \n # skip high friends\n if friend_id > max_user_id:\n continue\n \n # if it's about the last user, use him, otherwise create him\n if user_id == last_user.id: \n user = last_user\n else:\n user, created = User.objects.get_or_create(id=user_id)\n \n # get or create the friend\n friend, created = User.objects.get_or_create(id=friend_id) \n \n f1, f2 = (user, friend) if user.id < friend.id else (friend, user)\n \n # create the friendship\n Friend.objects.get_or_create(\n friend1=f1,\n friend2=f2)\n \n # save the user for the next time\n last_user = user\n\"\"\"\n6\t57699\t3\n7\t18858\t4\n8\t20644\t4\n8\t43310\t4\n8\t5806\t4\n9\t33430\t4\n11\t12145\t4\n11\t12913\t3\n11\t12920\t5\n11\t14034\t3.5\n11\t14277\t4\n11\t14506\t4\n11\t14889\t5\n11\t15154\t3.5\n11\t15815\t5\n11\t16476\t4.5\n11\t18181\t5\n11\t18874\t5\n\"\"\"\n\ndef _parse_ratings(filename):\n \"\"\"Parse the user csv file\"\"\"\n\n filename = _get_abs_path(filename)\n \n last_user = User(id=0) \n\n # open the csv reader\n reader = csv.reader(open(filename, \"rb\"), delimiter=separator, quoting=csv.QUOTE_NONE)\n \n for user_id, movie_id, rating in reader:\n \n if reader.line_num % 1000 == 0:\n print '%s lines processed' % reader.line_num \n \n # parse the ids and rating\n user_id = int(user_id)\n movie_id = long(movie_id) \n rating = rating\n\n # if it's about the last user, use him, otherwise get him\n if user_id == last_user.id: \n user = last_user\n else:\n # take only users we already have\n user = User.objects.filter(id=user_id) \n if not user:\n continue\n \n user=user[0]\n \n # get or create the movie\n movie, created = Movie.objects.get_or_create(id=movie_id)\n \n # create the rating\n Rating.objects.create(\n user=user, \n movie=movie,\n rating=rating)\n \n # save the user for the next time\n last_user = user\n \n" }, { "alpha_fraction": 0.6356180310249329, "alphanum_fraction": 0.6367896795272827, "avg_line_length": 38.39230728149414, "blob_id": "ceb6010c1aebeea284fd0d160826418e3b6dfca1", "content_id": "14522cb466bf528486bf4859ebe693234d0f7c19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5121, "license_type": "no_license", "max_line_length": 108, "num_lines": 130, "path": "/code/adapter/unresyst/algorithm/base.py", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "\"\"\"The base abstract class for all recommender algorithms.\"\"\"\n\nfrom unresyst.constants import *\nfrom unresyst.models.algorithm import RelationshipPredictionInstance\n\nclass BaseAlgorithm(object):\n \"\"\"The interface provided to the other packages. The methods in the interface\n are implemented by the subclasses\"\"\"\n \n def __init__(self, inner_algorithm=None):\n \"\"\"The initializer\"\"\"\n \n self.inner_algorithm = inner_algorithm\n \"\"\"The inner algorithm\"\"\"\n \n # Build phase:\n #\n \n def build(self, recommender_model):\n \"\"\"Build the recommender, so that the given relationship can be\n predicted easily.\n \n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender whose instances should\n be aggregated. \n\n \"\"\"\n if self.inner_algorithm:\n self.inner_algorithm.build(recommender_model=recommender_model)\n \n \n # Recommend phase:\n #\n \n def get_relationship_prediction(self, recommender_model, dn_subject, dn_object, remove_predicted):\n \"\"\"Get the prediction of the appearance of the predicted_relationship.\n\n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender whose instances should\n be aggregated. \n \n @type dn_subject: models.common.SubjectObject\n @param dn_subject: the domain neutral subject\n \n @type dn_object: models.common.SubjectObject\n @param dn_object: the domain neutral object\n\n @type remove_predicted: bool\n @param remove_predicted: should pairs already having \n predicted_relationship between them get the special expectancy value?\n \n @rtype: models.algorithm.RelationshipPredictionInstance\n @return: the model instance for the prediction \n \"\"\"\n if self.inner_algorithm:\n return self.inner_algorithm.get_relationship_prediction(\n recommender_model=recommender_model, \n dn_subject=dn_subject, \n dn_object=dn_object, \n remove_predicted=remove_predicted)\n\n \n def get_recommendations(self, recommender_model, dn_subject, count, expectancy_limit, remove_predicted):\n \"\"\"Get the recommendations for the given subject\n\n @type recommender_model: models.common.Recommender\n @param recommender_model: the recommender whose instances should\n be aggregated. \n\n @type dn_subject: models.common.SubjectObject\n @param dn_subject: the domain neutral subject\n\n @type count: int\n @param count: how many recommendations should be obtained\n \n @type expectancy_limit: float\n @param expectancy_limit: the lower limit for object expectancy, only\n objects with expectancy higher than limit are recommended.\n \n @type remove_predicted: bool\n @param remove_predicted: should pairs already having \n predicted_relationship between them be removed from recommendations?\n \n @rtype: a list of models.algorithm.RelationshipPredictionInstance\n @return: the predictions of the objects recommended to the subject\n \"\"\"\n if self.inner_algorithm:\n return self.inner_algorithm.get_recommendations(\n recommender_model=recommender_model,\n dn_subject=dn_subject,\n count=count,\n expectancy_limit=expectancy_limit,\n remove_predicted=remove_predicted)\n\n @staticmethod\n def _get_uncertain_prediction(recommender_model, dn_subject, dn_object):\n \"\"\"Get the prediction for a pair for which nothing is known\"\"\"\n \n return RelationshipPredictionInstance(\n subject_object1=dn_subject,\n subject_object2=dn_object,\n description=recommender_model.random_recommendation_description,\n recommender=recommender_model,\n expectancy=UNCERTAIN_PREDICTION_VALUE,\n is_uncertain=True\n ) \n\n @staticmethod\n def _get_already_in_relatinship_prediction(recommender_model, predicted_relationship):\n \"\"\"Get the prediction for a pair which is already in the predicted_relationship.\n Valid only for recommenders with remove_predicted=True.\n \"\"\"\n \n return RelationshipPredictionInstance(\n subject_object1=predicted_relationship.subject_object1,\n subject_object2=predicted_relationship.subject_object2,\n description=predicted_relationship.description,\n recommender=recommender_model,\n expectancy=ALREADY_IN_REL_PREDICTION_VALUE,\n is_trivial=True\n ) \n\n # Update phase:\n # \n \n def update(self, recommender, aggregated_changes):\n \"\"\"Update the recommender algorithm structures according to the changes\n in the aggregated relationships.\n \n \"\"\"\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.63840651512146, "avg_line_length": 18.959182739257812, "blob_id": "5686f75d1fbc60c81183db4c172749f280dd75d6", "content_id": "b006b353ab6ace2bb63d19825a6415d705edf892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 979, "license_type": "no_license", "max_line_length": 178, "num_lines": 49, "path": "/UnresystCD/code/adapter/traveleval.sh", "repo_name": "cvengros/unresyst", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n\n\nBUILD=true\nEVAL=''\nREC=''\nRECS=false\nPREDS=false\n\nfor param in $*;\ndo \n case $param in\n 'recs')\n EVAL='OrderTourRecommenderEvaluator'\n REC='OrderTourRecommender'\n RECS=true\n ;;\n 'preds')\n PREDS=true\n ;;\n 'dontbuild')\n BUILD=false\n ;;\n esac\ndone\n\nif [ $BUILD = true ]\nthen\n # build\n echo \"Building...\"\n echo \"from travel.recommender import *; OrderTourRecommender.build()\" | python ./manage.py shell\nfi\n\nif [ $RECS = true ]\nthen\n echo \"Evaluating recommendations...\"\n echo \"from travel.evaluation import *; from travel.recommender import *; $EVAL.evaluate_recommendations($REC, 10); quit()\"| python ./manage.py shell\nfi\n\nif [ $PREDS = true ]\nthen\n echo \"Evaluating predicitons...\"\n echo \"from travel.evaluation import *; from travel.recommender import *; OrderTourRankEvaluator.evaluate_predictions(OrderTourRecommender); quit()\" | python ./manage.py shell\nfi\n\n\n\necho \"\"\n\n" } ]
104
wilbarnes/jet-sweep
https://github.com/wilbarnes/jet-sweep
7ed19319e2d598447e23c245fb9386174bc6e2c0
1077ce600346420283dfcf12cda3111c62c47d1d
72ffe0a3498f59b47918abd56aeed4fb9f94e0ef
refs/heads/master
2020-05-20T05:49:24.383657
2019-05-07T14:26:20
2019-05-07T14:26:20
185,415,386
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7846994400024414, "alphanum_fraction": 0.7890710234642029, "avg_line_length": 82.18181610107422, "blob_id": "d8acf18340e62f39a5a04ad6ef5a511f6c67907c", "content_id": "2164d431bac6cc227bc79b7d1b362601c68a677f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 915, "license_type": "no_license", "max_line_length": 187, "num_lines": 11, "path": "/README.md", "repo_name": "wilbarnes/jet-sweep", "src_encoding": "UTF-8", "text": "# jet-sweep\nMinimize idle crypto drag by temporarily sweeping your digital assets to higher interest accounts (in this case, Compound v2 Presidio). Open to exploring additional defi infrastructures. \n\nSparked from an idea on how to offset the rising MakerDAO stability fee costs. Goal is to use hot/cold idle ETH to capture interest overnight.\n\n'jet-sweep' named after the [spectacular football play.](https://youtu.be/atrBEX-GP7E?t=78) \n\n[Investopedia definition](https://www.investopedia.com/terms/s/sweepaccount.asp): \n* A sweep account automatically transfers cash funds into a safe but higher interest-earning investment option at the close of each business day, e.g. into a money market fund.\n* Sweep accounts try to minimize idle cash drag.\n* A sweep account service, however, may not always be free - and you might have to pay fees to your broker that might make the sweep not as attractive on a net basis.\n" }, { "alpha_fraction": 0.6104430556297302, "alphanum_fraction": 0.6145569682121277, "avg_line_length": 29.095237731933594, "blob_id": "f00a4d0e3349b10375e9aea04e4624708b42e53e", "content_id": "c9cb992a4dbf45a72a3655a13f3f56987417076c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6320, "license_type": "no_license", "max_line_length": 106, "num_lines": 210, "path": "/main.py", "repo_name": "wilbarnes/jet-sweep", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport datetime\nimport time\nimport json\nimport argparse\nimport configparser\n\nfrom eth_utils import to_checksum_address\nfrom web3 import Web3\n\nfrom compoundv2 import Address\nfrom compoundv2.cETH import cETH\nfrom compoundv2.Comptroller import Comptroller\n\ndef main():\n \"\"\"\n Generic scripts for interacting with Compound v2 Presidio\n \"\"\"\n\n if args.enter in ('eth', 'Eth', 'ETH'):\n enter_markets_tx = contract_Comptroller.enter_markets(cETH_addr, address_one)\n print(enter_markets_tx)\n\n elif args.get_assets_in:\n get_assets_call = contract_Comptroller.get_assets_in(address_one)\n print(get_assets_call)\n\n elif args.get_balance:\n get_balance_call = contract_cETH.balanceOf(address_one.address)\n print(get_balance_call)\n\n elif args.get_liquidity:\n get_liquidity_call = contract_Comptroller.get_liquidity(address_one)\n print(get_liquidity_call)\n \n elif args.supply and args.eth:\n supply_eth = contract_cETH.mint(args.eth)\n tx = supply_eth.send_raw_transaction()\n print(web3.toHex(tx['transactionHash']))\n\n elif args.supply_rate:\n supply_rate_call = contract_cETH.supplyRatePerBlock()\n print(supply_rate_call / 1e18)\n\n elif args.exchange_rate:\n exchange_rate_call = contract_cETH.exchangeRateCurrent()\n print(exchange_rate_call / 1e18)\n\n elif args.redeem and args.eth:\n redeem_eth = contract_cETH.redeem(args.eth)\n print(redeem_eth)\n tx = redeem_eth.send_raw_transaction()\n print('ETH redeemed.')\n print(web3.toHex(tx['transactionHash']))\n\n elif args.sweep_overnight and args.eth:\n supply_eth = contract_cETH.mint(args.eth)\n tx = supply_eth.send_raw_transaction()\n get_balance_call = contract_cETH.balanceOf(address_one.address)\n print(get_balance_call)\n time.sleep(60 * 15)\n redeem_eth = contract_cETH.redeem(get_balance_call)\n tx = redeem_eth.send_raw_transaction()\n print('Sweep complete')\n print(web3.toHex(tx['transactionHash']))\n\n elif args.supply and args.dai:\n print('Supplying dai is not yet implemented.')\n\n elif args.borrow and args.eth:\n # requires market entry\n print('Borrowing eth is not yet implemented.')\n\n elif args.borrow and args.dai:\n # requires market entry\n print('Borrowing dai is not yet implemented.')\n\n else:\n print('Not a valid option.')\n\nif __name__ == \"__main__\":\n # set config parser and read config variables\n parser = argparse.ArgumentParser('Jet sweep your assets to higher earning contracts')\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n private_key = config['ACCOUNT']['PrivateKey']\n address_one = Address(config['ACCOUNT']['Address'])\n infura_mainnet = config['PROVIDER']['InfuraMainnet']\n infura_rinkeby = config['PROVIDER']['InfuraRinkeby']\n infura_secret = config['PROVIDER']['InfuraProjSecret']\n\n # compound.finance contracts\n cDAI = to_checksum_address(config['COMPOUND-CONTRACTS']['cDAI'])\n cETH_addr = Address(config['COMPOUND-CONTRACTS']['cETH'])\n comptroller_addr = Address(config['COMPOUND-CONTRACTS']['Comptroller'])\n price_oracle = to_checksum_address(config['COMPOUND-CONTRACTS']['PriceOracle'])\n stable_interest = to_checksum_address(config['COMPOUND-CONTRACTS']['StableCoinInterestRateModel'])\n standard_interest = to_checksum_address(config['COMPOUND-CONTRACTS']['StandardInterestRateModel'])\n\n # arguments\n parser.add_argument(\n '--supply', \n dest='supply', \n action='store_true', \n help='supply asset'\n )\n\n parser.add_argument(\n '--borrow', \n dest='borrow', \n action='store_true', \n help='borrow asset'\n )\n\n parser.add_argument(\n '--redeem', \n dest='redeem', \n action='store_true', \n help='redeem asset'\n )\n\n parser.add_argument(\n '--supply-rate', \n dest='supply_rate', \n action='store_true', \n help='retrieve supply rate per block'\n )\n\n parser.add_argument(\n '--exchange-rate', \n dest='exchange_rate', \n action='store_true', \n help='retrieve current exchange rate'\n )\n\n parser.add_argument(\n '--eth', \n dest='eth', \n help='ether'\n )\n\n parser.add_argument(\n '--dai', \n dest='dai', \n help='dai'\n )\n\n # markets argument group\n markets_grp = parser.add_argument_group(\n title=\"ENTER/EXIT MARKETS\",\n description=\"commands for entering & exiting markets\"\n )\n\n markets_grp.add_argument(\n '--enter', \n dest='enter', \n help='enter specified market'\n )\n\n # reporting argument group\n reporting_grp = parser.add_argument_group(\n title=\"REPORTING\", \n description=\"short form reports of your open positions\"\n )\n\n reporting_grp.add_argument(\n '--get-assets-in', \n dest = 'get_assets_in', \n action = 'store_true', \n help = 'get list of entered markets'\n )\n\n reporting_grp.add_argument(\n '--get-liquidity',\n dest = 'get_liquidity',\n action = 'store_true',\n help = 'return estimated ether value of account\\'s collateral'\n )\n\n reporting_grp.add_argument(\n '--get-balance-of',\n dest = 'get_balance',\n action = 'store_true',\n help = 'get cToken balance of provided account'\n )\n\n sweep_grp = parser.add_argument_group(\n title=\"sweep options for your digital assets\",\n description='quick POC for sweeping your ETH into high interest accounts'\n )\n\n sweep_grp.add_argument(\n '--sweep-overnight',\n dest = 'sweep_overnight',\n action = 'store_true',\n help = 'sweep specified amount of eth to a high interest account once overnight'\n )\n\n\n args = parser.parse_args()\n\n web3 = Web3(Web3.HTTPProvider(infura_rinkeby))\n print('Web3 connected:', web3.isConnected())\n \n contract_cETH = cETH(web3, cETH_addr, address_one)\n contract_Comptroller = Comptroller(web3, comptroller_addr)\n\n main()\n" }, { "alpha_fraction": 0.7490196228027344, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 27.33333396911621, "blob_id": "05a1ac376acd307d0393e3f7be0a2ad7cae892aa", "content_id": "f99a9b7299d09f8d4cbfc64bfe0ee3a35ef6e966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/compoundv2/account.py", "repo_name": "wilbarnes/jet-sweep", "src_encoding": "UTF-8", "text": "import os\nfrom eth_account import Account\nfrom web3 import Web3\nfrom typing import Optional\n\ndef private_key_to_account(web3: Web3):\n private_key = os.environ['ETH_PRIVATE_KEY']\n account = Account.privateKeyToAccount(private_key)\n return account\n" } ]
3
bjp12/recipe-book
https://github.com/bjp12/recipe-book
8b864adaae6c44d8792bd86eed913076bc4558a8
5ec410c8c830d5b4edb671bac64a68a10ee81140
ba811a13ff2f7ec0efcf049cbc7dfe5c9be5afea
refs/heads/master
2020-06-02T20:12:14.310775
2019-12-04T01:10:25
2019-12-04T01:10:25
191,294,186
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5807398557662964, "alphanum_fraction": 0.594245433807373, "avg_line_length": 27.88135528564453, "blob_id": "499ea62c45a96d493e62d092bcf549445eedd7f7", "content_id": "496eb6d8e80f237dcb8a9400a7c0d8dc8cd51cb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1703, "license_type": "no_license", "max_line_length": 133, "num_lines": 59, "path": "/recipe book/templates/ingredients.html", "repo_name": "bjp12/recipe-book", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<head>\n\t<title>Ingredient Picker</title>\n\t{% include 'header.html' %}\n\t<style>\n\t\t{% include 'css/style.css' %}\n\t</style>\n\n</head>\n<body>\n\n\t<div>\n\t\t<nav class=\"navbar navbar-expand-sm justify-content-end header\" style=\"background-color: #ffffff\">\n\t\t\t{% include 'navbar.html' %}\n\t\t</nav>\n\t\t<div class=\"title\">\n\t\t\t<img class=\"titleimg\" src=\"static/images/ingredients.png\" style=\"width: 100%; height: 550px; object-fit: cover;\">\n\t\t\t<div class=\"titletext\">\n\t\t\t\t<strong>Shown in this picture:</strong>\n\t\t\t\t<br>\n\t\t\t\t<p style=\"font-size: 18px;\">Leafy Greens: kale, spinach, collard greens</p>\n\t\t\t</div>\n\t\t</div>\n\t</div>\n\t<div>\n\t\t<form method=\"post\" id=\"ingform\" class=\"p-5\" style=\"background-color: #dfc5ce;\" action=\"/ingredients#result\">\n\t\t\t<div class=\"form-group\" id=\"form-group\">\n\t\t\t <label for=\"ingredients\" style=\"font-family:'Six Caps'; font-size: 50px; color: white;\">select your ingredients: </label> <br>\n\t\t\t <div class=\"row\">\n\t\t\t \t{% for ingredient in ilist %}\n\t\t\t \t<div class=\"col-3\">\n\t\t\t \t\t<input type=\"checkbox\" name=\"ingredients\" value=\"{{ ingredient }}\"> {{ ingredient }} <br>\n\t\t\t \t</div>\n\t\t\t\t {% endfor %}\n\t\t\t </div>\n\t\t\t <input type=\"submit\" class=\"btn btn-secondary float-right\" value=\"Find Me Recipes\" >\n\t\t\t <br>\n\t\t\t</div>\n\t\t</form>\n\t</div>\n\t{% if selected == True %}\n\t<div class=\"row\" id=\"result\">\n\t\t<div class=\"col-3\" style=\"background-color: #fde2d4; padding: 60px 30px;\">\n\t\t\t<h3>Selected Ingredients:</h3>\n\t\t\t<ul>\n\t\t\t{% for x in selected_list %}\n\t\t\t\t<li>{{ x }}</li>\n\t\t\t{% endfor %}\n\t\t\t</ul>\n\t\t</div>\n\t\t<div class=\"recipes col-8\" >\n\t\t\t{% include 'cards.html' %}\n\t\t</div>\n\t</div>\n\t{% endif %}\n\t{% include 'footer.html' %}\n</body>\n</html>" }, { "alpha_fraction": 0.7721518874168396, "alphanum_fraction": 0.7721518874168396, "avg_line_length": 53.3125, "blob_id": "56fda2074a8a62d0f3bdcb7497cf9524a0400b39", "content_id": "7bf98de085620afcbfbe45ae0f2c7dc2993aae65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 870, "license_type": "no_license", "max_line_length": 183, "num_lines": 16, "path": "/README.md", "repo_name": "bjp12/recipe-book", "src_encoding": "UTF-8", "text": "# Recipe Book\n\nMy friends and I love to cook and people are always asking us for our recipes! \nI created this recipe book as a way to easily access our recipes without having to scroll through boring side stories and process pictures. \n\nSome features implemented so far:\n * Build a grocery list by clicking on items you need to go get. Once you click build, this list will be copied to your clipboard so that you can easily text or email it to yourself.\n * Use the menu builder to try out different options for appetizers, entrées, desserts and drinks to design an entire meal with our recipes.\n * Use the search bar to find specific recipes.\n * Find recipes using ingredients you already have!\n \nComing Soon:\n * Process pictures and step by step instructions when you search for a specific recipe\n * More recipes!\n \nLet me know what else you would like to see!\n" }, { "alpha_fraction": 0.5531841516494751, "alphanum_fraction": 0.5612736940383911, "avg_line_length": 31.63483238220215, "blob_id": "b03f58fa2ffa6e422b6929ab2a88eb627d6748c2", "content_id": "a0f1cb5106791c0a106035e5df8d1bec751720a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5812, "license_type": "no_license", "max_line_length": 104, "num_lines": 178, "path": "/recipe book/homepage.py", "repo_name": "bjp12/recipe-book", "src_encoding": "UTF-8", "text": "import json\nfrom flask import Flask\nfrom flask import render_template, request\nimport csv\nimport pandas as pd\nimport re\n\napp = Flask(__name__)\ndatadict = {}\ningredient_list = []\ndata_pd = pd.read_csv(\"static/Data.csv\")\nmeal_col = data_pd[\"Meal\"]\ncusine_col = data_pd[\"Cuisine\"]\nprep_col = data_pd[\"Prep Time\"]\n# ingredient_list = [row.split(\",\") for row in ingredient_col]\n# ingredient_list = set(x.strip() for l in ingredient_list for x in l)\n\nwith open(\"static/Data.csv\", 'r+', encoding=\"utf-8\") as f:\n data = csv.DictReader(f)\n for row in data:\n name = row['Name']\n ingredients = row['Ingredients']\n for i in ingredients.split(\",\"):\n i = i.strip()\n if i not in ingredient_list:\n ingredient_list.append(i)\n ingredient_list.sort()\n recipe = re.findall(r'\\d.*',row['Recipe'])\n prep = row['Prep Time']\n fullName = row['Full Name']\n datadict[name] = (ingredients, recipe, prep, fullName)\n\nmeals = {i:[] for i in meal_col.unique()}\ncuisines = {i: [] for i in cusine_col.unique()}\ntimes = {i: [] for i in prep_col.unique()}\nfor item in datadict:\n item_row = data_pd.loc[data_pd['Name'] == item]\n name = item_row['Name'].item()\n meals[item_row['Meal'].item()].append(name)\n cuisines[item_row['Cuisine'].item()].append(name)\n times[item_row['Prep Time'].item()].append(name)\n\n\nappetizers = data_pd.loc[data_pd[\"Meal\"] == \"appetizer\"][\"Name\"].tolist()\nentrees = data_pd.loc[data_pd[\"Meal\"] == \"entrée\"][\"Name\"].tolist()\ndesserts = data_pd.loc[data_pd[\"Meal\"] == \"dessert\"][\"Name\"].tolist()\ndrinks = data_pd.loc[data_pd[\"Meal\"] == \"drink\"][\"Name\"].tolist()\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\[email protected]('/ingredients', methods=[\"GET\",\"POST\"])\ndef ingredients():\n if request.method == 'POST':\n result = request.form.getlist('ingredients')\n select = set()\n for item in datadict:\n for x in result:\n chosen = [i.strip() for i in datadict[item][0].split(',')]\n if x in chosen:\n select.add(item)\n\n return render_template('ingredients.html', \n ilist = ingredient_list, \n data = datadict, \n selected = True,\n selected_list = result,\n filters = [(\"Results\", select)],\n )\n return render_template('ingredients.html', ilist = ingredient_list, data = datadict, selected=False)\n\[email protected]('/meal-builder', methods=[\"GET\", \"POST\"])\ndef meal_builder():\n presets = {\n \"american\":[\"quinoa\", \"mexican-pizza\", \"oreo-truffles\", \"mojito\"],\n \"italian\":[\"quinoa\", \"ravioli\", \"cake\", \"mojito\"],\n \"mexican\":[\"quinoa\", \"spinach-puffs\", \"chocchipcookies\", \"mojito\"]\n }\n\n builder = {\n \"appetizer\": appetizers,\n \"entree\": entrees,\n \"dessert\": desserts,\n \"drink\": drinks\n }\n\n if request.method == 'POST':\n result = request.form.get('change')\n if result in presets.keys():\n preset = presets[result]\n appetizers.remove(preset[0])\n appetizers.insert(0, preset[0])\n entrees.remove(preset[1])\n entrees.insert(0, preset[1])\n desserts.remove(preset[2])\n desserts.insert(0, preset[2])\n drinks.remove(preset[3])\n drinks.insert(0, preset[3])\n else:\n col = builder[result]\n first = col[0]\n col.remove(col[0])\n col.append(first)\n\n return render_template('meal-builder.html', \n data = datadict,\n appetizers = appetizers,\n entrees = entrees,\n desserts = desserts,\n drinks = drinks\n )\n\n else:\n return render_template('meal-builder.html', \n data=datadict,\n appetizers = appetizers,\n entrees = entrees,\n desserts = desserts,\n drinks = drinks\n )\n\[email protected]('/recipes')\[email protected]('/recipes-time')\ndef recipes_time():\n my_order = [\"breakfast\", \"appetizer\",\"entrée\", \"dessert\", \"drink\"]\n order = {key: i for i, key in enumerate(my_order)}\n my_filters = sorted([(k,meals[k]) for k in meals], key=lambda d:order[d[0]])\n return render_template('recipes.html', \n data = datadict,\n \tfilters = my_filters\n )\n\[email protected]('/recipes-cuisine')\ndef recipes_cuisine():\n my_filters = [(k,cuisines[k]) for k in cuisines]\n return render_template('recipes.html',\n data = datadict, \n \tfilters = my_filters\n )\n\[email protected]('/recipes-prep')\ndef recipes_prep():\n my_order = [\"10-20 min\", \"20-30 min\", \"30-40 min\", \"50-60 min\", \"1-2 hrs\"]\n order = {key: i for i, key in enumerate(my_order)}\n my_filters = sorted([(k,times[k]) for k in times], key=lambda d:order[d[0]])\n return render_template('recipes.html', \n data = datadict,\n filters = my_filters\n )\n\[email protected]('/search', methods=[\"GET\", \"POST\"])\ndef search():\n if request.method == 'POST':\n result = request.form.get('search')\n data = data_pd.loc[data_pd[\"Full Name\"].str.lower().str.contains(result.lower(), regex=False)]\n name = data['Name']\n print(\"count: \", name.shape[0])\n if name.shape[0] == 1:\n return render_template('search.html',\n data = data,\n datadict = datadict[name.item()],\n item = result\n )\n else:\n return render_template('search-plural.html',\n data = data,\n names = name.tolist(),\n datadict = '',\n item = result\n )\n\[email protected]('/aboutus')\ndef about():\n return render_template('aboutus.html')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5004, debug=True)\n\n" } ]
3
polinagal/pyqt_mysql_gui_job_offer
https://github.com/polinagal/pyqt_mysql_gui_job_offer
f776920bb57e7a09494f0b210261dc4732e9b859
f19a1bea3ce7f10f18ffebca24068321b40b34ae
63617f36d1c2dbd303d646e6c373914eb4762433
refs/heads/master
2021-01-21T06:55:34.122390
2015-04-05T12:39:13
2015-04-05T12:39:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7571884989738464, "alphanum_fraction": 0.7699680328369141, "avg_line_length": 32, "blob_id": "51f13036efb0dc73298ffb49c1eab597cb817eaa", "content_id": "2837bf3de1472e773a1d621782b051183fce0ff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/hello_world.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#\nimport sys\nfrom PyQt4.QtGui import * # компоненты интерфейса\n\n# Каждое приложение должно создать объект QApplication\n# sys.argv - список аргументов командной строки\napplication = QApplication(sys.argv)\n\n# QWidget - базовый класс для всех объектов интерфейса\n# пользователя; если использовать для виджета конструктор\n# без родителя, такой виджет станет окном\nwidget = QWidget()\n\nwidget.resize(620, 640) # изменить размеры виджета\nwidget.setWindowTitle(\"Hop, Hey!\") # установить заголовок\nwidget.show() # отобразить окно на экране\n\nsys.exit(application.exec_()) # запуск основного цикла приложения" }, { "alpha_fraction": 0.6379974484443665, "alphanum_fraction": 0.6444159150123596, "avg_line_length": 31.5, "blob_id": "a14b4f782c1538e8d59ed9cabdcdfd1ce2dfa411", "content_id": "f2af16834c78fe908c10832c68fb29c16b681716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/mainform.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "from PyQt4 import QtCore, QtGui, uic # подключает основные модули PyQt\n \n# прототип главной формы\nclass MainForm(QtGui.QDialog):\n \n # конструктор\n def __init__(self):\n super(MainForm, self).__init__()\n \n # динамически загружает визуальное представление формы\n uic.loadUi(\"mainform.ui\", self)\n \n # связывает событие нажатия на кнопку с методом\n # old style (before pyqt 4.5):\n # self.connect(self.pushButton, QtCore.SIGNAL(\"clicked()\"),\n # self.setTextEdit)\n # new style (qt 4.5):\n self.pushButton.clicked.connect(self.setLabelText)\n\n # def setTextEdit(self):\n # self.textEdit.setText(self.plainTextEdit.text()) # не пашет\n\n def setLabelText(self):\n self.label.setText(\"New label text\")" }, { "alpha_fraction": 0.689686119556427, "alphanum_fraction": 0.6986547112464905, "avg_line_length": 31.735294342041016, "blob_id": "4f09be579d3310cc4f934790e74e7260c105b382", "content_id": "18003926178e80d3f30a4a8b79e878fd3a8d4bfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 101, "num_lines": 34, "path": "/hello_mysql.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "__author__ = 'pif'\n\nimport sys\nfrom PyQt4 import QtSql\nfrom PyQt4 import QtCore\nfrom PyQt4 import QtGui\n\ninstance = QtGui.QApplication(sys.argv)\ndb = QtSql.QSqlDatabase.addDatabase(\"QMYSQL\")\ndb.setHostName(\"localhost\") # TODO localhost?\ndb.setDatabaseName(\"spring_sql\")\ndb.setUserName('root')\ndb.setPassword('')\nok = db.open()\n\nquery = QtSql.QSqlQuery()\nquery.setForwardOnly(True) # speed optimization\nquery.exec('select * from client')\nwhile query.next():\n for field_number in range(0, query.record().count()):\n value = query.value(field_number)\n if isinstance(value, QtCore.QPyNullVariant):\n value = 'null'\n print(value, end='\\t')\n print()\n # name = query.value(0) if not isinstance(query.value(0), QtCore.QPyNullVariant) else 'null'\n # sex = query.value(1) if not isinstance(query.value(1), QtCore.QPyNullVariant) else 'null'\n # sex_lover = query.value(2) if not isinstance(query.value(2), QtCore.QPyNullVariant) else 'null'\n # print(name, sex, sex_lover)\n\nprint(db.tables())\n\ndb.close()\nQtSql.QSqlDatabase.removeDatabase(\"QMYSQL\") #TODO seriously? \"QMYSQL\"?\n\n\n" }, { "alpha_fraction": 0.7990115284919739, "alphanum_fraction": 0.8006590008735657, "avg_line_length": 45.769229888916016, "blob_id": "e1f684214d5344a13c2f16d3195b2d0f94e330e6", "content_id": "d87d790e02ea924c59ed78b25bd62b5a0814706f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 185, "num_lines": 13, "path": "/README.MD", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "Необходимо реализовать приложение, которое из прикрепленной БД будет выводить информацию о пациентах в таблице. \nСтолбцы: \n- ФИО\n- дата рождения + возраст\n- пол\n- информация о действующем полисе ОМС при наличии\n- информация о действующем документе (паспорт, свидетельство о рождении и т.д.)\n \nТакже нужен фильтр по основным полям, уже более детальный (отдельно фамилия, имя, отчество, серия документа, серия полиса и т.д.) с возможностью поиска одновременно по нескольким полям.\n \nРеализация должна быть на PyQt4. Используемая СУБД MySQL.\n\n(БД не добавлена в репозиторий из соображений конфиденциальности)." }, { "alpha_fraction": 0.71100914478302, "alphanum_fraction": 0.713302731513977, "avg_line_length": 35.41666793823242, "blob_id": "91d30ca8d5a4ad7cc4ad2b3849e056c00eac69f4", "content_id": "1e31f753c0c748b231aaccb5c76e1e7e4d3b1e04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 75, "num_lines": 12, "path": "/hello_button.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt4 import QtCore, QtGui # подключает основные модули PyQt\nimport mainform # подключает модуль описания формы\n\ndef main():\n app = QtGui.QApplication(sys.argv) # создаёт основной объект программы\n form = mainform.MainForm() # создаёт объект формы\n form.show() # даёт команду на отображение объекта формы и содержимого\n app.exec() # запускает приложение\n\nif __name__ == \"__main__\":\n sys.exit(main())" }, { "alpha_fraction": 0.5195333957672119, "alphanum_fraction": 0.5211611390113831, "avg_line_length": 34.79611587524414, "blob_id": "a75bb20ed91ba735624cd4a8ab18fc0e6b8df7e9", "content_id": "3531698ad249a788265cb9e2cfadfd9a92491b0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3926, "license_type": "no_license", "max_line_length": 130, "num_lines": 103, "path": "/main_dirty.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "__author__ = 'pif'\n\nimport sys\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtSql\n\nimport view\n\n\nclass Main():\n\n def __init__(self):\n app = QtGui.QApplication(sys.argv)\n\n self.db = self.open_db()\n\n self.view = view.View()\n self.view.showTableButton.clicked.connect(self.query) # TODO сделать проброс сигнала из view, а не вторгаться беспардонно\n self.view.show()\n\n app.exec()\n\n def open_db(self):\n db = QtSql.QSqlDatabase.addDatabase('QMYSQL')\n db.setHostName('localhost')\n db.setDatabaseName('spring_sql')\n db.setUserName('root')\n db.setPassword('')\n ok = db.open() # TODO что там, если не ок? Кинуть исключение?\n return db\n\n def query(self):\n # query = QtSql.QSqlQuery()\n # query.setForwardOnly(True) # speed optimization\n # query.exec('select * from cats')\n\n common_filter, \\\n lastname_filter, \\\n firstname_filter, \\\n patrname_filter,\\\n gender_filter,\\\n age_from, \\\n age_to, \\\n birthdate_from,\\\n birthdate_to, \\\n document_serial_filter, \\\n document_number_filter \\\n = self.view.get_fields_text()\n\n model = QtSql.QSqlQueryModel()\n model.setQuery(\n \"select \"\n \"CONCAT \"\n \"(\"\n \"client.lastName, ' ', \"\n \"client.firstName, ' ', \"\n \"client.patrName\"\n \") \"\n \"as 'ФИО', \" #TODO зависим от кодировки исходника, вынести в конфиг\n \"client.birthDate as 'дата рождения', \"\n \"TIMESTAMPDIFF(YEAR,client.birthDate,CURDATE()) as 'возраст', \"\n \"case client.sex when 1 then 'м' when 2 then 'ж' end as 'пол', \"\n \"CONCAT\"\n \"(\"\n \"clientdocument.serial, ' ', \"\n \"clientdocument.number\"\n \") \"\n \"as 'документ (серия, номер)' \"\n \"from client INNER JOIN clientdocument \"\n \"ON client.id = clientdocument.client_id \"\n \"where \"\n \"(\"\n \"client.lastName like '%\" + lastname_filter + \"%' and \"\n \"client.firstName like '%\" + firstname_filter + \"%' and \"\n \"client.patrName like '%\" + patrname_filter + \"%' and \"\n \"clientdocument.serial like '%\" + document_serial_filter + \"%' and \"\n \"clientdocument.number like '%\" + document_number_filter + \"%' and \"\n \"case client.sex when 1 then 'м' when 2 then 'ж' end like '%\" + gender_filter + \"%' and \"\n \"client.birthDate >= STR_TO_DATE('\" + birthdate_from + \"', '%Y-%m-%d') and \"\n \"client.birthDate <= STR_TO_DATE('\" + birthdate_to + \"', '%Y-%m-%d') and \"\n \"TIMESTAMPDIFF(YEAR,client.birthDate,CURDATE()) >= \" + str(age_from) + \" and \"\n \"TIMESTAMPDIFF(YEAR,client.birthDate,CURDATE()) <= \" + str(age_to) +\n \") \"\n \"and \"\n \"(\"\n \"client.lastName like '%\" + common_filter + \"%' or \"\n \"client.firstName like '%\" + common_filter + \"%' or \"\n \"client.patrName like '%\" + common_filter + \"%' or \"\n \"clientdocument.serial like '%\" + common_filter + \"%' or \"\n \"clientdocument.number like '%\" + common_filter + \"%'\"\n \") \"\n \"; \")\n #TODO maybe model.setQuery(query)?\n #TODO обработать случаи NULL переменных в sql-запросе\n\n self.view.set_model(model)\n\n def __del__(self): #TODO это может не вызваться, надо чистить ресурсы явно\n self.db.close()\n QtSql.QSqlDatabase.removeDatabase('QMYSQL')\n\nif __name__ == '__main__':\n Main()" }, { "alpha_fraction": 0.5257548689842224, "alphanum_fraction": 0.527531087398529, "avg_line_length": 27.897436141967773, "blob_id": "72092661c23244581b04a20b7d7f324dd2e7c066", "content_id": "9809627b4b209fa31ce598ebaa4a9e341d13fc7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1182, "license_type": "no_license", "max_line_length": 66, "num_lines": 39, "path": "/view_dirty.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "__author__ = 'pif'\n\nfrom PyQt4 import QtGui\nfrom PyQt4 import uic\n\n\nclass View(QtGui.QMainWindow):\n\n def __init__(self):\n super(View, self).__init__()\n\n # загружаем интерфейс\n uic.loadUi(\"view.ui\", self)\n\n # подправляем то, что не получилось сделать в Qt Designer\n self.gender.addItem(\"\")\n self.gender.addItem(\"м\")\n self.gender.addItem(\"ж\")\n\n def get_fields_text(self):\n return (\n self.common_filter.text(),\n self.lastName.text(),\n self.firstName.text(),\n self.patrName.text(),\n self.gender.currentText(),\n self.age_from.value(),\n self.age_to.value(),\n self.birthDate_from.date().toString('yyyy-MM-dd'),\n self.birthDate_to.date().toString('yyyy-MM-dd'),\n self.document_serial.text(),\n self.document_number.text(),\n )\n\n def set_model(self, model):\n self.tableView.setModel(model)\n # v = QtGui.QTableView(self.tableWidget)\n # v.setModel(model)\n # v.show()" }, { "alpha_fraction": 0.6364796161651611, "alphanum_fraction": 0.6492347121238708, "avg_line_length": 31.54166603088379, "blob_id": "236bc9bb4d9c8fffd9604b61abc9d16a77d67760", "content_id": "f6bcc80138892b60f82633f10e330b81c1597515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 103, "num_lines": 24, "path": "/mysql_qtgui.py", "repo_name": "polinagal/pyqt_mysql_gui_job_offer", "src_encoding": "UTF-8", "text": "__author__ = 'pif'\n\nimport sys\nfrom PyQt4 import QtSql\nfrom PyQt4 import QtCore\nfrom PyQt4 import QtGui\nimport view\n\napp = QtGui.QApplication(sys.argv)\napp.exec()\n\n# while query.next():\n# for field_number in range(0, query.record().count()):\n# value = query.value(field_number)\n# if isinstance(value, QtCore.QPyNullVariant):\n# value = 'null'\n# print(value, end='\\t')\n# print()\n# # name = query.value(0) if not isinstance(query.value(0), QtCore.QPyNullVariant) else 'null'\n# # sex = query.value(1) if not isinstance(query.value(1), QtCore.QPyNullVariant) else 'null'\n# # sex_lover = query.value(2) if not isinstance(query.value(2), QtCore.QPyNullVariant) else 'null'\n# # print(name, sex, sex_lover)\n#\n# print(db.tables())\n\n\n\n" } ]
8
SomEOnESmith/task_04
https://github.com/SomEOnESmith/task_04
3f35db740270b67d40a29297ac8c6f9af0ebae63
549ce8e51ff543b7b3206807efe807b351f63f3e
a7d613d043290b8c58b138657410896f1752018a
refs/heads/master
2020-07-08T11:49:32.042272
2019-08-22T13:28:12
2019-08-22T13:28:12
203,663,994
0
0
null
2019-08-21T21:02:29
2019-06-17T16:25:09
2019-08-06T01:45:36
null
[ { "alpha_fraction": 0.5193199515342712, "alphanum_fraction": 0.5486862659454346, "avg_line_length": 22.962963104248047, "blob_id": "b94edc40b397bf1cc75f7718f2d0d6757affd92d", "content_id": "5180abc9be6319b8c0d91041851dbbecb9f54375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 47, "num_lines": 27, "path": "/restaurants/migrations/0002_auto_20190822_1322.py", "repo_name": "SomEOnESmith/task_04", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-08-22 13:22\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restaurants', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='restaurant',\n name='closeing_time',\n ),\n migrations.AddField(\n model_name='restaurant',\n name='closing_time',\n field=models.TimeField(null=True),\n ),\n migrations.AlterField(\n model_name='restaurant',\n name='opening_time',\n field=models.TimeField(),\n ),\n ]\n" } ]
1
evargasv/particle-filter
https://github.com/evargasv/particle-filter
07e632351333083628c9526c79c2b77ba81e34b0
38115f2f7a3b6e0769b08fc776502b0b6210e43e
31acb57949acb5d29c03dc5f1360d8e424551cd4
refs/heads/master
2022-04-17T15:18:18.537322
2020-04-13T19:25:16
2020-04-13T19:25:16
255,417,707
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8352112770080566, "alphanum_fraction": 0.8352112770080566, "avg_line_length": 140.60000610351562, "blob_id": "3c75ac9f8ef489b6c1a117322956c51e47d3663d", "content_id": "05d3338a3153b525aede7c63ff4857cbc0dfae1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 710, "license_type": "no_license", "max_line_length": 549, "num_lines": 5, "path": "/README.md", "repo_name": "evargasv/particle-filter", "src_encoding": "UTF-8", "text": "# Particle Filter\n\nParticle Filter or Montecarlo Localization (MCL) algorithm implementation to localise a two dimensional robot (turtlebot) in a given map.\n\nThe MCL localization is an implementation of the Markovian localisation problem where the involved probability density functions are represented through samples (particles) and the Bayes filter is implemented through the Particle Filter. Markov localisation addresses the problem of state estimation from sensor data. Markov localization is a probabilistic algorithm: Instead of maintaining a single hypothesis as to where in the world a robot might be, Markov localization maintains a probability distribution over the space of all such hypotheses.\n\n\n" }, { "alpha_fraction": 0.4799180328845978, "alphanum_fraction": 0.49467211961746216, "avg_line_length": 36.53845977783203, "blob_id": "8dc908ed5e9b806d8871eda6f1e193419c50c767", "content_id": "3cd3afc29c99c5662dc86ee1c6f269fd74fed0f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7320, "license_type": "no_license", "max_line_length": 142, "num_lines": 195, "path": "/src/particle_filter.py", "repo_name": "evargasv/particle-filter", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom functions import angle_wrap\n\n#===============================================================================\nclass ParticleFilter(object):\n '''\n Class to hold the whole particle filter.\n\n p_wei: weights of particles in array of shape (N,)\n p_ang: angle in radians of each particle with respect of world axis, shape (N,)\n p_xy : position in the world frame of the particles, shape (2,N)\n '''\n\n #===========================================================================\n def __init__(self, room_map, num, odom_lin_sigma, odom_ang_sigma,\n meas_rng_noise, meas_ang_noise):\n '''\n Initializes the particle filter\n room_map : an array of lines in the form [x1 y1 x2 y2]\n num : number of particles to use\n odom_lin_sigma: odometry linear noise\n odom_ang_sigma: odometry angular noise\n meas_rng_noise: measurement linear noise\n meas_ang_noise: measurement angular noise\n '''\n\n # Copy parameters\n self.map = room_map\n self.num = num\n self.odom_lin_sigma = odom_lin_sigma\n self.odom_ang_sigma = odom_ang_sigma\n self.meas_rng_noise = meas_rng_noise\n self.meas_ang_noise = meas_ang_noise\n\n # Map\n map_xmin = np.min(self.map[:, 0])\n map_xmax = np.max(self.map[:, 0])\n map_ymin = np.min(self.map[:, 1])\n map_ymax = np.max(self.map[:, 1])\n\n # Particle initialization\n self.p_wei = 1.0 / num * np.ones(num)\n self.p_ang = 2 * np.pi * np.random.rand(num)\n self.p_xy = np.vstack(( map_xmin + (map_xmax - map_xmin) * np.random.rand(num),\n map_ymin + (map_ymax - map_ymin) * np.random.rand(num) ))\n\n #===========================================================================\n def predict(self, odom):\n '''\n Moves particles with the given odometry.\n odom: incremental odometry [delta_x delta_y delta_yaw] in the vehicle frame\n '''\n\n # Add Gaussian noise to odometry measures\n lin_noise = np.random.randn(self.num,2) * self.odom_lin_sigma\n ang_noise = angle_wrap( np.random.randn(self.num) * self.odom_ang_sigma + odom[2] )\n\n # Increment particle positions in correct frame\n odom_noise = np.tile(odom[0:2], (self.num, 1))\n odom_noise += lin_noise\n\n self.p_xy[0,:] += odom_noise[:,0] * np.cos(self.p_ang) - odom_noise[:,1] * np.sin(self.p_ang)\n self.p_xy[1,:] += odom_noise[:,0] * np.sin(self.p_ang) + odom_noise[:,1] * np.cos(self.p_ang)\n\n # Increment angley\n self.p_ang += ang_noise\n self.p_ang = angle_wrap(self.p_ang)\n\n #===========================================================================\n def weight(self, lines):\n '''\n Look for the lines seen from the robot and compare them to the given map.\n Lines expressed as [x1 y1 x2 y2].\n '''\n\n # Constant values for all weightings\n val_rng = 1.0 / (self.meas_rng_noise * np.sqrt(2 * np.pi))\n val_ang = 1.0 / (self.meas_ang_noise * np.sqrt(2 * np.pi))\n\n # origin of the map\n orig = np.array( [0, 0, 0] )\n\n # map and measured lines array for polar coordinates\n expected_lines = np.zeros( (self.map.shape[0], 2) )\n measured_lines = np.zeros( (lines.shape[0], 2) )\n\n # Loop over particles\n for i in range(self.num):\n\n # Position of the particle\n odom = np.array( [self.p_xy[0,i], self.p_xy[1,i], self.p_ang[i] ] )\n # Weight of each measured line\n lines_wei = np.zeros( lines.shape[0] )\n\n # Transform map lines to local frame and to [range theta]\n for j in range(self.map.shape[0]):\n # Transform to [range theta]\n expected_lines[j,:] = self.get_polar_line( self.map[j,:], odom )\n\n # Transform measured lines to [range theta] and weight them\n for j in range(lines.shape[0]):\n\n # Transform measured lines\n measured_lines[j,:] = self.get_polar_line( lines[j,:], orig )\n map_wei = np.zeros( expected_lines.shape[0] )\n\n # Select the map line that best corresponds to the measured line. The weight of them is calculated and the maximum is selected\n for k in range(expected_lines.shape[0]):\n\n # Weight them\n range_wei = val_rng * np.exp((-( measured_lines[j,0] - expected_lines[k,0] )**2.0)/(2.0*(val_rng**2.0)))\n angle_wei = val_ang * np.exp((-( measured_lines[j,1] - expected_lines[k,1] )**2.0)/(2.0*(val_ang**2.0)))\n map_wei[k] = range_wei * angle_wei\n\n # Best associated line on the map\n lines_wei[j] = map_wei.max()\n\n # OPTIONAL question\n # make sure segments correspond, if not put weight to zero\n #\n #\n\n # Take best weighting (best associated lines)\n self.p_wei[i] *= np.sum( lines_wei )\n\n # Normalize weights\n self.p_wei /= np.sum(self.p_wei)\n\n\n #===========================================================================\n\n def resample(self):\n '''\n Systematic resampling of the particles.\n '''\n # Look for particles to replicate\n p_idx = np.zeros(self.num, dtype=np.uint8)\n r = np.random.rand() * (1.0/self.num)\n c = self.p_wei[0]\n j = 0\n\n for i in range(self.num):\n u = r + ( i * (1.0/self.num) )\n while u > c:\n j = (j + 1) % self.num\n c = c + self.p_wei[j]\n p_idx[i] = int(j);\n\n # Pick chosen particles\n self.p_ang = self.p_ang[(p_idx)]\n self.p_wei = self.p_wei[(p_idx)]\n self.p_xy = self.p_xy[:,p_idx]\n #===========================================================================\n def get_mean_particle(self):\n '''\n Gets mean particle.\n '''\n # Weighted mean\n weig = np.vstack((self.p_wei, self.p_wei))\n mean = np.sum(self.p_xy * weig, axis=1) / np.sum(self.p_wei)\n\n ang = np.arctan2( np.sum(self.p_wei * np.sin(self.p_ang)) / np.sum(self.p_wei),\n np.sum(self.p_wei * np.cos(self.p_ang)) / np.sum(self.p_wei) )\n\n return np.array([mean[0], mean[1], ang])\n\n #===========================================================================\n def get_polar_line(self, line, odom):\n '''\n Transforms a line from [x1 y1 x2 y2] from the world frame to the\n vehicle frame using odomotrey [x y ang].\n Returns [range theta]\n '''\n # Line points\n x1 = line[0]\n y1 = line[1]\n x2 = line[2]\n y2 = line[3]\n\n # Compute line (a, b, c) and range\n line = np.array([y1-y2, x2-x1, x1*y2-x2*y1])\n pt = np.array([odom[0], odom[1], 1])\n dist = np.dot(pt, line) / np.linalg.norm(line[:2])\n\n # Compute angle\n if dist > 0:\n ang = np.arctan2(line[1], line[0])\n else:\n ang = np.arctan2(-line[1], -line[0])\n\n # Return in the vehicle frame\n return np.array([np.abs(dist), angle_wrap(ang - odom[2])])\n" } ]
2
LoveGalaxy/PyTorch_Tutorial
https://github.com/LoveGalaxy/PyTorch_Tutorial
3ed7879bd177bb9c47f8a39a67f0126b81ce0290
482434770cd51ade8c1f7e91f0be67daa8cbcfc2
0d40503c0e101bb881c918ff1e1f429497860acf
refs/heads/master
2022-04-29T19:08:01.432004
2018-06-07T18:24:20
2018-06-07T18:24:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5605888962745667, "alphanum_fraction": 0.5979614853858948, "avg_line_length": 20.0238094329834, "blob_id": "1be8d3d0fd002c0638e86f9e1a6b63d7db2a9761", "content_id": "50c3f7f2a302bb1602e7c50494443f3ca20779b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 54, "num_lines": 42, "path": "/Part 2: Pytorch简单例子/1 Use Tnensor to implement a NN.py", "repo_name": "LoveGalaxy/PyTorch_Tutorial", "src_encoding": "UTF-8", "text": "import torch\n\n# numpy已经为我们提供了强大的计算能力,但是为了能让我们的数据\n# 能够在GPU上进行加速,我们需要将计算的类型转化为Tensor\n# 设置torch tensor类型,设置计算设备\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n\nN = 64\nD_in = 1000\nH = 100\nD_out = 10\n\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\nw1 = torch.randn(D_in, H, device=device, dtype=dtype)\nw2 = torch.randn(H, D_out, device=device, dtype=dtype)\n\nlr = 1e-6\n\nfor t in range(500):\n # 前馈计算\n h = x.mm(w1)\n h_relu = h.clamp(min=0)\n y_pred = h_relu.mm(w2)\n\n # 计算损失\n loss = (y_pred - y).pow(2).sum().item()\n print(t, loss)\n\n # bp算法\n grad_y_pred = 2.0 * (y_pred - y)\n grad_w2 = h_relu.t().mm(grad_y_pred)\n grad_h_relu = grad_y_pred.mm(w2.t())\n grad_h = grad_h_relu.clone()\n grad_h[h < 0] = 0\n grad_w1 = x.t().mm(grad_h)\n\n # 更新网络权值\n w1 -= lr * grad_w1\n w2 -= lr * grad_w2\n" }, { "alpha_fraction": 0.6621924042701721, "alphanum_fraction": 0.6845637559890747, "avg_line_length": 22.526315689086914, "blob_id": "58575110fac4ac08a65005af0b9a64a4c02c07d9", "content_id": "2741ad8cabc91ebc103044396a87a2e40c2fbc63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 791, "license_type": "no_license", "max_line_length": 38, "num_lines": 19, "path": "/Part 1: 60分钟快速入门/README.md", "repo_name": "LoveGalaxy/PyTorch_Tutorial", "src_encoding": "UTF-8", "text": "### 第一部分 60min 快速入门\n\n### Tutorial.py\n#### 这是官方文档 60min 快速入门 PyTorch 的第一部分,\n#### 主要介绍了 PyTorch 的一些基本语法\n\n### Autograd.py\n#### 这是官方文档 60min 快速入门 PyTorch 的第二部分,\n#### 主要介绍了 PyTorch 中的自动求导机制\n\n### NeuralNetwork.py\n#### 这是官方文档 60min 快速入门 PyTorch 的第三部分,\n#### 主要实现了一个简单的LeNet网络,介绍了用 Pytorch\n#### 搭建卷积神经网络的主要步骤\n\n### Simple Example.ipynb\n#### 这是官方文档 60min 快速入门 PyTorch 的第四、五部分\n#### 第四部分主要是用卷积神经网络做了一个图像分类器\n#### 第五部分主要是介绍了在多显卡的时候如何使用 Pytorch\n" }, { "alpha_fraction": 0.5052770376205444, "alphanum_fraction": 0.5474933981895447, "avg_line_length": 16.738094329833984, "blob_id": "106d5d2fb9b42fc61790c1b6ba7ddd8e6fe32aa9", "content_id": "1e5c778723c766a63b893613a4d69897cb6be4d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "no_license", "max_line_length": 39, "num_lines": 42, "path": "/Part 2: Pytorch简单例子/0 Use numpy to implement a NN.py", "repo_name": "LoveGalaxy/PyTorch_Tutorial", "src_encoding": "UTF-8", "text": "import numpy as np\n\n# N 是 batch 大小\n# D_in, D_out 是神经网络输入和输出的维度\n# H 是神经网络隐藏层的维度\nN = 64\nD_in = 1000\nH = 100\nD_out = 10\n\n# 生成随机数据\nx = np.random.randn(N, D_in)\ny = np.random.randn(N, D_out)\n\n# 初始化神经网络参数权值\nw1 = np.random.rand(D_in, H)\nw2 = np.random.rand(H, D_out)\n\n# 设置学习速率\nlr = 1e-6\n\nfor t in range(500):\n # 前馈计算\n h = x.dot(w1)\n h_relu = np.maximum(h, 0)\n y_pred = h_relu.dot(w2)\n\n # 计算误差(loss),采用的是平方和\n loss = np.square(y_pred - y).sum()\n print(t, loss)\n\n # 反馈计算\n grad_y_pred = 2.0 * (y_pred - y)\n grad_w2 = h_relu.T.dot(grad_y_pred)\n grad_h_relu = grad_y_pred.dot(w2.T)\n grad_h = grad_h_relu.copy()\n grad_h[h < 0] = 0\n grad_w1 = x.T.dot(grad_h)\n\n # ,更新权值\n w1 -= lr * grad_w1\n w2 -= lr * grad_w2\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6407643556594849, "alphanum_fraction": 0.6611465215682983, "avg_line_length": 17.66666603088379, "blob_id": "c134b4954a8f6b164996b57f3470544e81224cb3", "content_id": "700d17506b0fb9a0074a2ebbdd6de8120014ed36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 55, "num_lines": 42, "path": "/Part 2: Pytorch简单例子/6 Pytorch:Use optim.py", "repo_name": "LoveGalaxy/PyTorch_Tutorial", "src_encoding": "UTF-8", "text": "import torch\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\nx = torch.randn(N, D_in)\ny = torch.randn(N, D_out)\n\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out)\n)\n\nloss_fn = torch.nn.MSELoss(size_average=False)\n\nlr = 1e-4\n\n# 几种常见的优化器, SGD, Momentum, RMSprop, Adam\n\"\"\"\nSGD 是最普通的优化器, 也可以说没有加速效果, \n而 Momentum 是 SGD 的改良版, 它加入了动量原则. \n后面的 RMSprop 又是 Momentum 的升级版. \n而 Adam 又是 RMSprop 的升级版. \n不过从这个结果中我们看到, Adam 的效果似乎比 RMSprop 要差一点. \n所以说并不是越先进的优化器, 结果越佳. \n我们在自己的试验中可以尝试不同的优化器, 找到那个最适合你数据/网络的优化器.\n\"\"\"\noptimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n\nfor t in range(500):\n y_pred = model(x)\n\n loss = loss_fn(y_pred, y)\n print(t, loss)\n\n optimizer.zero_grad()\n\n loss.backward()\n\n # 使用优化器来更新梯度\n optimizer.step()\n\n" }, { "alpha_fraction": 0.7283372282981873, "alphanum_fraction": 0.733021080493927, "avg_line_length": 16.079999923706055, "blob_id": "97f817d4301cc4c0e59baa50e1c3c7d48d90cdd4", "content_id": "c800116e66afc8f0941b93108a8394c586d77073", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 969, "license_type": "no_license", "max_line_length": 48, "num_lines": 25, "path": "/README.md", "repo_name": "LoveGalaxy/PyTorch_Tutorial", "src_encoding": "UTF-8", "text": "# PyTorch_Tutorial\nAbout PyTorch\n\n## 这是一个中文的 Pytorch Tutorial 项目\n\n### 本来想用英文来写,但是英文的资料已经很多了\n### 我的母语是中文,那我就写一个中文的入门代码\n### 现在的代码主要还是以官方的例子为主,我现在\n### 把介绍和注释改为中文,同时我也做了一些相关\n### 的实验,把这些实验得到的调参经验,以及学习\n### 到得深度学习知识加入注释,方便大家理解学习\n### 我会尽可能多的加入代码以及中文的注释\n\n## 阅读大纲\n\n### 第一部分 60min 快速入门\n\n### \n### 第二部分 Pytorch简单例子\n\n### 第三部分 迁移学习\n#### 目前我还不是很清楚迁移学习的具体知识,去补习一下,再来接着更新,估计会暂时跳票一段时间\n\n#### 本人知识也有限,错误之处希望得到大家的指正\n#### 我会尽量每天都更新这个库,任何您的意见我都会认真考虑\n" }, { "alpha_fraction": 0.6771568059921265, "alphanum_fraction": 0.6966902017593384, "avg_line_length": 25.897809982299805, "blob_id": "15e1e83328809d17b28d152182f7b0fc562dde44", "content_id": "69fb35690355d9f050176f834ff500df7a0f6a88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4566, "license_type": "no_license", "max_line_length": 88, "num_lines": 137, "path": "/Part 1: 60分钟快速入门/0 Tutorial.py", "repo_name": "LoveGalaxy/PyTorch_Tutorial", "src_encoding": "UTF-8", "text": "import torch\n\n# python version is 3.6.4\n# torch version is 0.4.0\n# print(torch.__version__)\n\n# create a 5*3 matrix, uninitialized\n# 创建一个没有初始化的 5*3 的矩阵\nx = torch.empty(5, 3)\nprint(x)\n\n# create a 5*3 randomly initialized matrix\n# 创建一个随机初始化的 5*3 矩阵\nx = torch.rand(5, 3)\nprint(x)\n\n# create a matrix filled zeros and of dtype long\n# 创建一个由 0 组成的,数据类型为 long 的矩阵\nx = torch.zeros(5, 3, dtype=torch.long)\nprint(x)\n\n# construct a tensor directly from data\n# 直接从数据中创建 tensor 对象\nx = torch.tensor([5.5, 3])\nprint(x)\n\n# create a tensor basing on existing tensor\n# these methods will reuse properties of the input tensor\n# 首先创建一个全为1的矩阵\n# 然后创建一个和原矩阵一样大,由随机数生成的矩阵\nx = x.new_ones(5, 3, dtype=torch.double)\nprint(x)\nx = torch.randn_like(x, dtype=torch.float)\nprint(x)\n\n# get its size\n(i, j) = x.size() # torch.Size is in fact a tuple, so it supports all tuple operations.\nprint(i, j)\nprint(x.shape) # .shape == .size()\n\n# There are multiple syntaxes for operations\n# In the following example, we will take a look at the addition operation\n# 接下来,以加法为例介绍tensor类的运算操作\n\n\n# Addition: syntax 1\n# 加法: 第一种形式\ny = torch.rand(5, 3)\nprint(x + y)\n\n# Addition: syntax 2\n# 加法: 第二种形式\nprint(torch.add(x, y))\n\n# Addition: providing an output tensor as argument\n# 加法: 参数返回结果\nresult = torch.empty(5, 3)\ntorch.add(x, y, out=result)\nprint(result)\n\n# Addtion in-place\n# 加法:原地加法\n# Any operation that mutates a tensor in-place is post-fixed with an _.\n# For example: x.copy_(y), x.t_(), will change x\ny.add_(x)\nprint(y)\n\n# You can use standard NumPy-like indexing with all bells and whistles!\n# 可以像 numpy 一样索引\nprint(x[:, 1])\n\n# Resizing: If you want to resize/reshape tensor, you can use torch.view\n# 可以是用torch.view方法,重建tensor对象的维度\nx = torch.randn(4, 4, 3)\ny = x.view(3, 16)\nz = x.view(-1, 8) # the size -1 is inferred from other dimensions\nprint(x.size(), y.size(), z.size())\n# 这个方法是顺序整理\n# 例如上面代码中x生成了一个 4 维 3*3 的矩阵\n# 转换为 3 维,长度为 16 的向量时,都是顺序排列\nx = torch.randn(4, 4, 3)\ny = x.view(3, 4, 4)\n# 再考虑一下这个代码,我们把 3*4*4 的张量看做一个长方体\n# 事实上,通过 view 的变换的到得长方体并不是旋转过后的长方体\n# 而是按照顺序重排列之后的长方体\n\n# If you have a one element tensor, use .item() to get the value as a Python number\n# 如果你的 tensor 张量只有一个元素,使用 .item() 获取里面的值\nx = torch.randn(1)\nprint(x)\nprint(x.item())\n\n# The Torch Tensor and NumPy array will share their underlying memory locations,\n# and changing one will change the other.\n# torch tensor 可以和 numpy array 直接转换,但他们会共享内存,\n# 这意味着如果你改变了其中之一,另一个也会随之改变\na = torch.ones(5)\nprint(a)\nb = a.numpy()\nprint(b)\n# 改变了a中元素值,b中的值也随之改变\na.add_(1)\nprint(a)\nprint(b)\n\n# changing the np array changed the Torch Tensor automatically\n# numpy 转 tensor, 接着改变 numpy array, torch tensor 随之改变\n# 注意:不在文件头部 import 模块是不符合 PEP 8 规范的\nimport numpy as np\na = np.ones(5)\nb = torch.from_numpy(a)\nnp.add(a, 1, out=a)\nprint(a)\nprint(b)\n\n# All the Tensors on the CPU except a CharTensor support converting to NumPy and back.\n# 除了 CharTensor 外,别的 tensor 都支持与 numpy array 进行转换\n\n# Tensors can be moved onto any device using the .to method.\n# 用 .to 方法更换计算设备\n\n# let us run this cell only if CUDA is available\n# We will use ``torch.device`` objects to move tensors in and out of GPU\nif torch.cuda.is_available():\n device = torch.device(\"cuda\") # a CUDA device object\n y = torch.ones_like(x, device=device) # directly create a tensor on GPU\n x = x.to(device) # or just use strings ``.to(\"cuda\")``\n z = x + y\n print(z)\n print(z.to(\"cpu\", torch.double)) # ``.to`` can also change dtype together!\n\n# 有的电脑有CUDA,有的没有,为了使代码有更好的移植性,我们可以额这么来写\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nx = torch.rand(5, 3, device=device) # rand 0, 1均匀分布\ny = torch.randn(5, 3, device=device) # randn 均值0,方差1的正态分布\nz = x + y\nprint(z)\n\n" } ]
6
Thelin90/G.D.C.K
https://github.com/Thelin90/G.D.C.K
3ec4a1d6fcd4d4695a846a118b49dd68d61e5113
dcaf437d119b874654fa58dbd560a4545a555864
74e008a03e04c55afb7a1bc0dfdc8c09ae56648e
refs/heads/master
2022-11-20T01:37:38.097678
2019-06-04T11:00:11
2019-06-04T11:00:11
134,684,092
0
0
Apache-2.0
2018-05-24T08:18:58
2019-06-04T11:00:19
2022-11-11T07:32:39
Python
[ { "alpha_fraction": 0.6452810168266296, "alphanum_fraction": 0.6930010318756104, "avg_line_length": 29.419355392456055, "blob_id": "5fb256f9581199a758db732b0ffe6192d2cb8029", "content_id": "8c04384f6998befc03967637e97af16c61f459eb", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1886, "license_type": "permissive", "max_line_length": 113, "num_lines": 62, "path": "/Dockerfile", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "\nFROM ubuntu:latest\n\n# PATH\nENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n# Spark\nENV SPARK_VERSION 2.3.1\nENV SPARK_HOME /usr/local/spark\nENV SPARK_LOG_DIR /var/log/spark\nENV SPARK_PID_DIR /var/run/spark\nENV PATH $PATH:$SPARK_HOME/bin\nENV PYSPARK_PYTHON /usr/bin/python3.6\nENV PYSPARK_DRIVER_PYTHON=/usr/bin/python3.6\nENV PYTHONUNBUFFERED 1\n# Java\nENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/\n# Python\nENV alias python=/usr/bin/python3.6\nENV PYTHONPATH /etc/app/src/\n\n# Install curl\nRUN apt-get update && apt-get install -y curl\n\n# Install Python 3.6, 2.7 is standard to ubuntu:latest\nRUN apt-get update && \\\n apt-get install -y python3.6 && \\\n apt-get install -y python3-pip\n\n# Install Java, had an issue found this: https://stackoverflow.com/questions/46795907/setting-java-home-in-docker\nRUN apt-get update && apt-get install -y openjdk-8-jdk && \\\n apt-get install -y ant && apt-get clean && \\\n rm -rf /var/lib/apt/lists/ && \\\n rm -rf /var/cache/oracle-jdk8-installer;\n\n# Set workspace\nWORKDIR /etc/app\n\n# Add all the project files to the\nADD . /etc/app\n\n# Download Spark\nRUN curl -L http://www.us.apache.org/dist/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-hadoop2.7.tgz \\\n | tar -xzp -C /usr/local/ && \\\n ln -s spark-${SPARK_VERSION}-bin-hadoop2.7 ${SPARK_HOME}\n\n# Make run.sh executable\nRUN chmod +x /etc/app/scripts/run.sh && chmod +x /etc/app/src/data/input_data\n\n# Give give -rw-r--r-- to python files\nRUN chmod 0644 /etc/app/src/etl.py && \\\n chmod 0644 /etc/app/src/geoip.py && \\\n chmod 0644 /etc/app/src/data/input_data\n\nEXPOSE 8080 8081 6066 \\\n 7077 4040 7001 \\\n 7002 7003 7004 \\\n 7005 7006\n\nENTRYPOINT [\"./scripts/run.sh\"]\n\n# Replace the Entrypoint running the run.sh with this\n# to keep the container alive, tobe able to debug the container\n# ENTRYPOINT [\"tail\", \"-f\", \"/dev/null\"]" }, { "alpha_fraction": 0.6243016719818115, "alphanum_fraction": 0.6298882961273193, "avg_line_length": 24.571428298950195, "blob_id": "fbc9ac73f2a3a05852f4d095dbb3677678f7e00f", "content_id": "830a13247f6aa8446bdfdd8d31e0cd805aaa47e5", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "permissive", "max_line_length": 60, "num_lines": 28, "path": "/test/fakesparksession.py", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "import logging\nfrom pyspark.sql import SparkSession\n\nimport unittest\n\nclass PySparkTest(unittest.TestCase):\n\n @classmethod\n def suppress_py4j_logging(cls):\n logger = logging.getLogger(\"py4j\")\n logger.setLevel(logging.WARN)\n\n @classmethod\n def create_testing_pyspark_session(cls):\n return (SparkSession.builder\n .master(\"local[2]\")\n .appName(\"my-local-testing-pyspark-context\")\n .enableHiveSupport()\n .getOrCreate())\n\n @classmethod\n def setUpClass(cls):\n cls.suppress_py4j_logging()\n cls.spark = cls.create_testing_pyspark_session()\n\n @classmethod\n def tearDownClass(cls):\n cls.spark.stop()\n" }, { "alpha_fraction": 0.6188467144966125, "alphanum_fraction": 0.6244725584983826, "avg_line_length": 26.346153259277344, "blob_id": "fe50ca6fa3975efbc46fe872e8a4f478f363f661", "content_id": "2990b0de72ecb9ee2f0e7c79c8862f54f8331071", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1422, "license_type": "permissive", "max_line_length": 109, "num_lines": 52, "path": "/src/geoip.py", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"\nPython script to transform ip to country and city, has been modified\n\"\"\"\n\nimport socket\nimport pygeoip\nimport os\n\n\"\"\" http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz \"\"\"\ncwd = os.getcwd()\nrawdata = pygeoip.GeoIP(cwd + \"/src/geoconvertdat/GeoLiteCity.dat\")\n\n\ndef ipquery(ip):\n \"\"\"Function to parse IP to country, city\n http://www.linuxx.eu/2014/05/geolocate-ip-with-python.html\n Have been slightly modified\n\n In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes.\n\n Args:\n ip: The actual IP used to parse the country and city from\n\n Returns: A str value of \"country-city\"\n\n \"\"\"\n\n try:\n socket.inet_aton(str(ip))\n data = rawdata.record_by_name(ip)\n\n if type(data) is dict:\n\n country = type(data['country_name']) is str and data['country_name'] or type(\n data['country_name']) and data['country_name'] or \"NotTraceable\"\n\n city = type(data['city']) and data['city'] or type(\n data['city']) is str and data['city'] or \"NotTraceable\"\n\n if isinstance(country, bytes):\n country = country.decode()\n\n if isinstance(city, bytes):\n city = city.decode()\n\n return country + \"-\" + city\n\n except socket.error:\n return \"NotTraceable-NotTraceable\"\n" }, { "alpha_fraction": 0.6662476658821106, "alphanum_fraction": 0.6900062561035156, "avg_line_length": 30.947792053222656, "blob_id": "cbaf446cbe48dcdca50cb6d0cafdbe5f6f885dd4", "content_id": "a92ebbdd08c0de53aebac4b300408dca466c3be5", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7955, "license_type": "permissive", "max_line_length": 567, "num_lines": 249, "path": "/README.md", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "# Overview\n\n## Trello Feature Board\n\n* [Board](https://trello.com/b/qigu0NnW/gdck)\n\n![alt text](https://cdna.artstation.com/p/assets/images/images/009/551/700/large/pavel-fedorchuk-geck04.jpg?1519637244)\n\nGarden of Data Creation Kit `(An edit to the famous G.E.C.K in the Bethesda Fallout game series`) is a project that processes large amounts of data, currently TSV files and performs extraction, transformation and loading (ETL).\n\n## Project Design\n\nThe design of this project is that it should be fast and easy to deploy and run. Below is a description of how the application should be deployment ready.\n\n![Screenshot](https://github.com/Thelin90/PySparkDocker/blob/master/images/deployment.png)\n\nMicroservices is an architectural style that structures an application as a collection of loosely coupled services. Therefore enrich business capabilities, such as continuous delivery/deployment of scale.\n\n## Data Processing\n\nApache Spark is the chosen tool used within this project. Spark is quick and very responsive tool to perform data processing with. It provides an analytic engine for large-scale data processing. It is a general distributed in-memory computing framework implemented in scala. Hence spark operates on distributed data collections. However it does not do distributed storage. Spark is proven to be much faster than the popular Apache Hadoop framework. Apache Spark and Hadoop have different purposes but works good together. A high level overview of Apache Spark below: \n\n![Screenshot](https://github.com/Thelin90/PySparkDocker/blob/master/images/spark.png)\n\nHadoop is essentially a distributed data infrastructure. It provides distributes big data collections across multiple nodes within a cluster of commodity servers. A high level overview of Apache Hadoop below: \n\n![Screenshot](https://github.com/Thelin90/PySparkDocker/blob/master/images/hadoop.png)\n\n\n## PySpark\n\nPySpark have some core concepts. Such as resilient distributed datasets and dataframes.\n\nThis project spefically take advantage of the dataframes.\n\nIt has derived from the resilient distributed datasets concept. Dataframes will provide a higher level of abstraction. Which implicitly allow a query language way of typing to manipulate data. The abstraction represents data and a schema. Which enable better interaction with front end applications.\n\n## Dataset\nThe dataset has the (default) values divided by tab in the given dataset provided (input data): field header (date, time, user_id, url, IP, user_agent_string). See attached source file input_data.\n\n## Setup\n\nClone directly from the source code:\n\n```bash\ngit clone https://github.com/Thelin90/G.D.C.K.git\n```\n\nDocker needs to be installed on the machine. It can also run locally without docker.\n\n### Requirements\n\n* Docker environment\n* Python ^3.6.5\n* Java ^8\n* Spark ^2.3.x `(mininum 2.3.0)`\n\nAssuming that Python, Docker and Java is already setup.\n\n### Setup Apache-Spark\n\nStart with downloading Spark (note that depending on your IDE, you need to specify your Spark location):\n\n- https://spark.apache.org/downloads.html\n\nSet your SPARK_HOME in `.bashrc`\n```bash\nSPARK_HOME='path-to-spark'\n```\n\nThen source the file\n\n```bash\nsource ~/.bashrc\n```\n\n#### PyCharm Example\n\nMark as source:\n\n```bash\nFile -> Settings -> Project Structure -> add root content '+' -> 'path-to-spark'\n```\n\nSet environment variables:\n\n```bash\nRun -> Edit Configuration -> Environment Variables -> add new environment variables\n```\n```bash\nNAME\t\t\t\tVALUE\nPYSPARK_PYTHON\t\t\t'path-to-python'\nPYSPARK_DRIVER_PYTHON\t\t'path-to-python'\n```\n\n### Testing\n\nRun the following command to run tests:\n\n```bash\npython3 -W ignore:ResourceWarning -m unittest test/unittest-etl-process.py\n```\n\n### Manual Run\n\nRemember to set PYTHONPATH in `~/.bashrc`\n\n```bash\nPYTHONPATH=path-to-proj/PySparkDocker/src/\n```\n\nThen `source ~/.bashrc`\n\n\nRun the script manually without using docker.\n\n```bash\nspark-submit src/etl.py\n```\n\n### Run ETL Docker\n\n```bash\ndocker build -t etl-cities-countries .\ndocker run etl-cities-countries\n```\n\n## Result\n\n* Top 5 Countries based on number of events\n* Top 5 Cities based on number of events \n* Top 5 Browsers based on number of unique users\n* Top 5 Operating systems based on number of unique users.\n\n```bash\ndate and time column is becomming one timestamp...\nThe user_agent_string is becomming os and browser...\nConverting IP adress to city and country... \nPrinting Transformed Dataframe Schema\n |-- eventID: long (nullable = false)\n |-- timestamp: timestamp (nullable = true)\n |-- user_id: string (nullable = true)\n |-- url: string (nullable = true)\n |-- os: string (nullable = true)\n |-- browser: string (nullable = true)\n |-- country: string (nullable = true)\n |-- city: string (nullable = true)\n\nPerform load\nTop 5 countries based on number of events\n+--------------+------+\n| country| count|\n+--------------+------+\n|United Kingdom|135831|\n| Ireland| 18522|\n| Sweden| 12143|\n| Norway| 6908|\n| Netherlands| 2131|\n+--------------+------+\nonly showing top 5 rows\n\nTop 5 cities based on number of events\n+------------+-----+\n| city|count|\n+------------+-----+\n|NotTraceable|15336|\n| Dublin| 5750|\n| London| 4827|\n| Manchester| 3736|\n| Bristol| 2801|\n+------------+-----+\nonly showing top 5 rows\n\nTop 5 Browsers based on number of unique users\n+--------------------+-----------------------+\n| browser|count(DISTINCT user_id)|\n+--------------------+-----------------------+\n| Safari 7.0| 27117|\n| Safari 8.0| 17941|\n|Chrome 37.0.2062.124| 5054|\n| Safari 7.0.6| 2804|\n| Safari 7.1| 2486|\n+--------------------+-----------------------+\nonly showing top 5 rows\n\nTop 5 Operating systems based on number of unique users\n+----------------+-----------------------+\n| os|count(DISTINCT user_id)|\n+----------------+-----------------------+\n| IPad iOS 8.0.2| 11126|\n| IPad iOS 7.1.2| 10786|\n| Windows 7| 6722|\n|iPhone iOS 8.0.2| 4651|\n|iPhone iOS 7.1.2| 4496|\n+----------------+-----------------------+\nonly showing top 5 rows\n\nSpark application ends\n```\n\n### Spark UI\n\nTo see the spark jobs process do the following while running the docker image:\n\n```bash\ndocker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' etl-cities-countries\n```\n\nThis will give the IP of the docker container, type following in the web browser:\n\n`container-ip:4040`\n\nThis will give the following output:\n\n![Screenshot](https://github.com/Thelin90/PySparkDocker/blob/master/images/sparkui.png)\n\n## License\n\nMIT License\n\nCopyright (c) [2018] [Simon Thelin]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n[Back To The Top](#read-me-template)\n\n---\n\n## Author Info\n\n* [Simon Thelin](https://www.linkedin.com/in/simon-thelin-3ba96986/)\n\n[Back To The Top](#read-me-template)\n" }, { "alpha_fraction": 0.752010703086853, "alphanum_fraction": 0.752010703086853, "avg_line_length": 31.434782028198242, "blob_id": "27b855b3bb9675ebc846937a224d9e3a18fd83a7", "content_id": "b54dcb1d8491cdd045112f839504a41df3eecf8b", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 746, "license_type": "permissive", "max_line_length": 151, "num_lines": 23, "path": "/docs/PULL_REQUEST_TEMPLATE.md", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "# Pull Request Template\n\nPlease read this template to ensure your pull request adheres to the given guidelines.\n\n## Guidelines\n- Use the following format for header: `TD/xxxx/G.D.C.K-text-what-has-been-done` (the repo owner will fill in the xxxx to fit the actual ticket number)\n- Link additions should be added to the bottom of the relevant category.\n- New features or improvements to the existing features are welcome.\n\n`NOTE! ACHTUNG!`\n- Search previous suggestions before making a new one, as yours may be a duplicate.\n- Do not use `#` for commenting the PR commit message, only use `-`.\n\nGiven example:\n\n```\nTD-xxxx/G.D.C.K-some-fixes-to-something\n\n- New feature A\n - additional link\n- Improvement to feature B\n```\nThanks for contributing!\n" }, { "alpha_fraction": 0.7404580116271973, "alphanum_fraction": 0.7557252049446106, "avg_line_length": 25.399999618530273, "blob_id": "0564913906bfe9bbe39a4b0f0d74e9bf5a68a34e", "content_id": "66cf32f9fd237e9e0b70d16627505d966992ad72", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "permissive", "max_line_length": 54, "num_lines": 5, "path": "/test/__init__.py", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "# For relative imports to work in Python 3.6\nfrom os import path\nimport sys\n\nsys.path.append(path.dirname(path.realpath(__file__)))" }, { "alpha_fraction": 0.4475524425506592, "alphanum_fraction": 0.6853147149085999, "avg_line_length": 14.88888931274414, "blob_id": "15252489b4854128f28486d0498c5e67c5ad7a97", "content_id": "5a051799a5149e6329ec8141c1ecf4a3e8f96c93", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 143, "license_type": "permissive", "max_line_length": 22, "num_lines": 9, "path": "/scripts/requirements.txt", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "httpagentparser==1.8.1\nnumpy==1.15.0\npandas==0.23.4\npy4j==0.10.7\npygeoip==0.3.2\npyspark==2.3.2\npython-dateutil==2.7.3\npytz==2018.4\nsix==1.11.0\n" }, { "alpha_fraction": 0.6843373775482178, "alphanum_fraction": 0.6867470145225525, "avg_line_length": 30.923076629638672, "blob_id": "506e9683b54f192e1920cb3cc55bde603338da42", "content_id": "001abec6b008146ba3b8da8c113370323d9ade9b", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 415, "license_type": "permissive", "max_line_length": 114, "num_lines": 13, "path": "/test/unittest-etl-process.py", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom .fakesparksession import PySparkTest\nfrom etl import extract\n\n\nclass SimpleTest(PySparkTest):\n \"\"\"\n Will perform test on the functions used within the ETL process with a fake spark session\n \"\"\"\n def test_basic(self):\n expected_df = extract(self.spark)\n\n self.assertEquals(expected_df.schema.names, [\"date\", \"time\", \"user_id\", \"url\", \"ip\", \"user_agent_string\"])\n" }, { "alpha_fraction": 0.7422680258750916, "alphanum_fraction": 0.7628865838050842, "avg_line_length": 31.33333396911621, "blob_id": "1dfe90110602e682728026fb086f76f4d9bd628c", "content_id": "c88a7019a69e12ee4353d654b01372f8f7354c8c", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 97, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/scripts/run.sh", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\npython3.6 -m pip install -r scripts/requirements.txt\nspark-submit src/etl.py\n" }, { "alpha_fraction": 0.645996630191803, "alphanum_fraction": 0.6500644683837891, "avg_line_length": 28.214492797851562, "blob_id": "dd9e91c9780ba5166792bdb4c6beeee7a19e508d", "content_id": "fa967ba6ab9f466180def161e33d3eaee34b6411", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10079, "license_type": "permissive", "max_line_length": 116, "num_lines": 345, "path": "/src/etl.py", "repo_name": "Thelin90/G.D.C.K", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"\nPython script to initiate and perform the ETL process\n\"\"\"\n\nimport os\nimport pyspark\nimport httpagentparser\nfrom pyspark.sql.functions import struct\nfrom pyspark.sql.functions import *\nfrom os.path import abspath\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import TimestampType, StringType\nfrom geoip import ipquery\n\n\ndef addMonoticallyIncreasingID(_df, _colname):\n \"\"\"Adds monotically increasing ID column to Spark Dataframe\n\n Args:\n _df: Spark Dataframe\n _colname: Name of the col containing the monotoically increasing ID\n\n Returns: Spark Dataframe with a column containing a monotoically increasing ID\n \"\"\"\n if isinstance(_colname, str):\n return _df.withColumn(_colname, monotonically_increasing_id())\n else:\n raise ValueError\n\n\ndef castToTimeStamp(_df, colname):\n \"\"\" Function casts column values to TimestampType\n Args:\n _df: Spark Dataframe\n colname: Name of column\n\n Returns\n \"\"\"\n coldatatype = _df.schema[colname].dataType\n if isinstance(coldatatype, TimestampType):\n raise ValueError\n else:\n print(\"Column \" + colname + \" with datatype \" + str(coldatatype) + \" typecasting to TimestampType\")\n return _df.withColumn(colname, _df[colname].cast(TimestampType()))\n\n\ndef renameColumn(_df, _oldcolname, _newcolname):\n \"\"\"Rename a column name\n Args:\n _df:\n _oldcolname:\n _newcolname:\n\n Returns:\n \"\"\"\n if isinstance(_oldcolname, str) and isinstance(_newcolname, str):\n return _df.withColumnRenamed(_oldcolname, _newcolname)\n else:\n raise ValueError(\"Column names must be of typ str\")\n\n\ndef concatColName(_df, _newcol, _oldcols, separator):\n \"\"\"\n Args:\n _df: Spark Dataframe\n newcol: Name of the new column\n oldcols: The names of the old columns\n separator: The separator value, examples: `;` `-` `.` `,`\n\n Returns\n \"\"\"\n if isinstance(_newcol, str) and isinstance(_oldcols, list):\n return _df.withColumn(_newcol, concat_ws(separator, _df[_oldcols[0]], _df[_oldcols[1]]))\n else:\n raise ValueError\n\n\ndef splitCol(_df, _split, _colnames):\n \"\"\"Creates 3 new columns from 1 original column\n\n TODO: N new columns from 1 original column\n\n Args:\n _dataframe: The dataframe that needs to split columns\n _split: The value to split on, ex: \"-\", \",\", \"*\"\n _colnames: The new names for the new columns\n\n Returns:\n\n \"\"\"\n split_col = pyspark.sql.functions.split(_df[_colnames[0]], _split)\n ret_df = _df.withColumn(_colnames[1], split_col.getItem(0))\n return ret_df.withColumn(_colnames[2], split_col.getItem(1))\n\n\ndef dropColumns(_df, colnames):\n \"\"\"Drops N number of undesired columns\n\n Args:\n _df: Spark Dataframe\n columnames: List containing column names to be dropped\n\n Returns: Spark Dataframe\n\n \"\"\"\n if isinstance(colnames, list):\n return _df.drop(*colnames)\n else:\n raise ValueError(\"colnames is not a list\")\n\n\ndef mergeDataframes(_df0, _df1, _colnames, _orderbycols):\n \"\"\"Merges two Spark Dataframes to one\n\n Args:\n _df0: Spark Dataframe 0\n _df1: Spark Dataframe 1 to be joined with Spark Datafram 0\n colnames: list of column names to be set\n orderbycol: list containing id value to join by\n\n Returns:\n\n \"\"\"\n print(\"Merge Spark Dataframes...\")\n ret_df = _df0.join(_df1, _df0[_orderbycols[0]] == _df1[_orderbycols[1]])\n ret_df = ret_df.orderBy(_orderbycols[0], ascending=True)\n ret_df = ret_df.select(*_colnames)\n\n return ret_df\n\n\ndef udfCreator(_df, _functioncall, _newcolname, _oldcolname, datatype):\n \"\"\"\n Args:\n _df: Spark Dataframe\n _functioncall: The function to be called\n _newcolname: The name of the new column containing the extracted values\n _oldcolname: The column values from a specific column to be sent, example: ip addresses\n datatype: Which datatype to work with, example: StringType()\n\n Returns: Spark Dataframe with one new additional column containing values from the UDF call\n \"\"\"\n if isinstance(_newcolname, str) and isinstance(_oldcolname, str):\n print(\"Making UDF call to function: \" + str(_functioncall))\n _udf = udf(_functioncall, datatype)\n return _df.withColumn(_newcolname, _udf(_df[_oldcolname]))\n else:\n raise Exception(\"Column names must be of str\")\n\n\ndef getOsBrowser(value):\n \"\"\"Calls the httpagentparser and retrieves the os and browser information\n\n Args:\n value: Each column value of user_agent_string\n\n Returns: The browser and os as a string\n\n \"\"\"\n if isinstance(value, str):\n return str(httpagentparser.simple_detect(value)[0] + \"-\" + httpagentparser.simple_detect(value)[1])\n else:\n raise ValueError\n\n\ndef getCityAndCountry(_ip):\n \"\"\"Function to make call to ipquery, may seem redundant but it makes the code more readable when doing UDF calls\n\n Args:\n _ip: IP address value\n\n Returns: A value of country-city from the IP\n \"\"\"\n return ipquery(_ip)\n\n\ndef load(_df):\n \"\"\"Load function to print the result and to save the dataframe for api calls\n\n TODO: Save dataframe/dataframes to postgres\n\n Args:\n _df: The final dataframe\n\n Returns: Nothing\n\n \"\"\"\n # Peform load process\n\n print(\"Displaying the top 5 rows\")\n _df.show(5)\n\n print(\"Top 5 countries based on number of events\")\n _df.groupBy(\"country\").count().orderBy(\"count\", ascending=False) \\\n .show(5)\n\n print(\"Top 5 cities based on number of events\")\n _df.groupBy(\"city\").count().orderBy(\"count\", ascending=False) \\\n .show(5)\n\n print(\"Top 5 Browsers based on number of unique users\")\n\n _df.groupBy(\"browser\").agg(countDistinct(\"user_id\")) \\\n .orderBy(\"count(DISTINCT user_id)\", ascending=False) \\\n .show(5)\n\n print(\"Top 5 Operating systems based on number of unique users\")\n _df.groupBy(\"os\").agg(countDistinct(\"user_id\")) \\\n .orderBy(\"count(DISTINCT user_id)\", ascending=False) \\\n .show(5)\n\n\ndef transform(_df, _spark):\n \"\"\"This function handles the ransformation of the dataset (biggest part)\n\n Args:\n _df: Initial, unhandled dataframe straight from extraction\n _spark: sparksession\n\n Returns: Final and structured dataframe\n\n \"\"\"\n print(\"Spark Dataframe is being cleaned....\")\n\n print(\"date and time column is becomming one timestamp...\")\n newcolname = \"timestamp\"\n oldcolnames = [\"date\", \"time\"]\n separator = \" \"\n _df = concatColName(_df, newcolname, oldcolnames, separator)\n _df = dropColumns(_df, oldcolnames)\n _df = castToTimeStamp(_df, newcolname)\n\n print(\"The user_agent_string is becomming os and browser...\")\n # Getting the browser and OS from user_agent_string (amazingly fast! wow!)\n newcolname = \"getOsBrowser\"\n oldcolname = \"user_agent_string\"\n _df = udfCreator(_df, getOsBrowser, newcolname, oldcolname, StringType())\n\n # Cleaning Os Browser result\n print(\"Cleaning OS, Browser result\")\n separator = \"-\"\n newcolnames = [\"getOsBrowser\", \"os\", \"browser\"]\n dropcols = [\"getOsBrowser\", \"user_agent_string\"]\n _df = splitCol(_df, separator, newcolnames)\n _df = dropColumns(_df, dropcols)\n\n # Cleaning IP addresses\n print(\"Cleaning IP addresses\")\n separator = \",\"\n newcolnames = [\"ip\", \"ip1\", \"ip2\"]\n dropcols = ['ip']\n _df = splitCol(_df, separator, newcolnames)\n _df = dropColumns(_df, dropcols)\n\n # Adding eventID to the dataframe, so that we can join other dataframes\n newcolname = \"eventID\"\n _df = addMonoticallyIncreasingID(_df, newcolname)\n\n print(\"Converting IP adress to city and country... \")\n newcolname = \"getCityAndCountry\"\n oldcolname = \"ip1\"\n _df = udfCreator(_df, getCityAndCountry, newcolname, oldcolname, StringType())\n\n # Modify ip dataframe for countries and cities of the first ip column\n separator = \"-\"\n newcolnames = [\"getCityAndCountry\", \"country\", \"city\"]\n dropcols = [\"getCityAndCountry\", \"eventID\", \"timestamp\", \"user_id\", \"url\", \"os\", \"browser\", \"ip2\"]\n newcolname = \"ip\"\n oldcolname = \"ip1\"\n _ip = splitCol(_df, separator, newcolnames)\n _ip = dropColumns(_ip, dropcols)\n _ip = renameColumn(_ip, oldcolname, newcolname)\n\n # create a monotonically increasing id\n newcolname = \"id\"\n _ip = addMonoticallyIncreasingID(_ip, newcolname)\n\n # Merge countries and cities to org Spark Dataframes\n newcolnames = [\"eventID\", \"timestamp\", \"user_id\", \"url\", \"os\", \"browser\", \"country\", \"city\", \"ip\"]\n orderbycols = [\"eventID\", \"id\"]\n ret_df = mergeDataframes(\n _df,\n _ip,\n newcolnames,\n orderbycols,\n )\n\n return ret_df\n\n\ndef extract(_spark):\n \"\"\"Extracting the tsv file into a DataFrame\n\n Args:\n _spark: The actual spark session\n\n Returns: Initial dataframe before transform\n\n \"\"\"\n cwd = os.getcwd()\n # Initial read of the given TSV file\n _df = _spark.read.option(\"delimiter\", \"\\t\")\\\n .csv(cwd + \"/src/data/input_data\")\\\n .toDF(\"date\", \"time\", \"user_id\", \"url\", \"ip\", \"user_agent_string\")\n\n _spark.sparkContext.setLogLevel(\"WARN\")\n\n return _df\n\n\ndef main(_spark):\n # Perform extraction\n print(\"Perform extraction\")\n df = extract(spark)\n\n # Perform transformation\n print(\"Perform transformation\")\n df = transform(df, spark)\n\n print(\"Printing Transformed Dataframe Schema\")\n df.printSchema()\n\n # Load the data, do some printing, make it searchable for the API, maybe a postgres\n print(\"Perform load\")\n load(df)\n\n print(\"Spark application ends\")\n\n # Stop spark application\n spark.stop()\n\n\nif __name__ == \"__main__\":\n # Initial setup of spark project\n warehouse_location = abspath('spark-warehouse')\n spark = SparkSession \\\n .builder \\\n .appName(\"CitiesCountriesTest\") \\\n .config(\"spark.sql.warehouse.dir\", warehouse_location) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n main(spark)\n" } ]
10
Arkkav/git-learing
https://github.com/Arkkav/git-learing
d0b748d6304a1357a88ac227b76387b03e2fd4a4
4888dbe55f1ab77f2c3972ed12d9fe03f54b991e
fc4ac2cb918c9d8ca4f71d45657a5991ea8c54ba
refs/heads/master
2021-01-07T07:30:13.035094
2020-03-31T08:11:40
2020-03-31T08:11:40
241,614,959
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 15.777777671813965, "blob_id": "4b3878dbf7e6f7ca78759c26d05edfe5ea5fb12d", "content_id": "674b63226989a3e5758f6090c591cd1405228dfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 36, "num_lines": 9, "path": "/Django_projects/vk_oauth/vk_oauth/apps/articles/views.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\n\n\ndef index(request):\n\treturn HttpResponse(\"Привет, мир!\")\n\n\ndef test(request):\n\treturn HttpResponse(\"Test page\")" }, { "alpha_fraction": 0.6798780560493469, "alphanum_fraction": 0.6829268336296082, "avg_line_length": 22.5, "blob_id": "855fff72efeea51d23aca1e346a5a3815e793dba", "content_id": "eec7a8f21692a18311c71ffc0b3102879c53fa3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 50, "num_lines": 14, "path": "/Python_stepik_2/1.5.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "class MoneyBox:\n\n\tdef __init__(self, capacity):\n\t\t# конструктор с аргументом – вместимость копилки\n\t\tself.capacity = capacity\n\t\tself.count = 0\n\n\tdef can_add(self, v):\n\t\t# True, если можно добавить v монет, False иначе\n\t\t\treturn self.count + v <= self.capacity\n\n\tdef add(self, v):\n\t\t# положить v монет в копилку\n\t\tself.count += v" }, { "alpha_fraction": 0.4110429584980011, "alphanum_fraction": 0.4519427418708801, "avg_line_length": 24.789474487304688, "blob_id": "b64f815a42415b83cbb2d9cb49d13c89ba9e52f9", "content_id": "6216bec9c6c429d744dd7dee7fc93470e3e7d527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/MyPr1/ex3.7.4.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "a = ('\\n'.join(input().lower() for i in range(int(input())))).split('\\n')\nx = 0\ny = 0\nfor s in a:\n\tif s[0:2] == 'юг':\n\t\ty -= int(s[3:])\n\telif s[0:5] == 'запад':\n\t\tx -= int(s[6:])\n\telif s[0:6] == 'восток':\n\t\tx += int(s[7:])\n\telif s[0:5] == 'север':\n\t\ty += int(s[6:])\nprint(str(x) + ' ' + str(y))\n\n# s = {'север': 0, 'запад': 0, 'юг': 0, 'восток': 0}\n# for i in range(int(input())):\n# k = input().split()\n# s[k[0]] += int(k[1])\n# print(s['восток'] - s['запад'], s['север'] - s['юг'])" }, { "alpha_fraction": 0.6132075190544128, "alphanum_fraction": 0.6320754885673523, "avg_line_length": 16.83333396911621, "blob_id": "aafb89885ca199c9b477a8768556461600af3b65", "content_id": "69ef3b2aa248b2fbc2f70f0b1236e878d72792df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/Python_stepik_2/3.3.9.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tprint(re.sub(r\"(\\w)\\1+\", r'\\1', line))" }, { "alpha_fraction": 0.540145993232727, "alphanum_fraction": 0.5839415788650513, "avg_line_length": 21.83333396911621, "blob_id": "8a18f08225cde2f00b7e8b3282e28a98358f6075", "content_id": "ab9914576b0697636d9645953a329b02c8f00d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 62, "num_lines": 6, "path": "/Python_stepik_2/test.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "a = set([1, 2, 3])\nb = set([1, 3, 2])\n\nprint(a == b, type(a), sorted(b))\n\ns = \"abba com mother bill mother com abba dog abba mother com\"\n" }, { "alpha_fraction": 0.5813953280448914, "alphanum_fraction": 0.6511628031730652, "avg_line_length": 31.125, "blob_id": "4d30309fd74e9fd1f215536dba5bf6f235cae44f", "content_id": "e2fdc7c55b697444b689fad3df860ade54260594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 82, "num_lines": 8, "path": "/MyPr1/ex17.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import requests\n\nr = requests.get('https://stepic.org/media/attachments/course67/3.6.3/699991.txt')\nb = r.text.strip()\nwhile b[0:2] !='We':\n r = requests.get('https://stepic.org/media/attachments/course67/3.6.3/' + b)\n b = r.text.strip()\n print(b)\n\n" }, { "alpha_fraction": 0.5072045922279358, "alphanum_fraction": 0.5072045922279358, "avg_line_length": 19.47058868408203, "blob_id": "8cce65453e5ec2ce4f038b0a6cf68e7864f72aa4", "content_id": "c8562ace5d78ea7961d8806aed73796c3142b889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/MyPr1/ex19.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "a, b, c, d = input(), input(), input(), input()\ng = {}\nfor i in range(len(a)):\n g[a[i]] = b[i]\n\ndef cypher(a, d):\n b = ''\n for i in range(len(a)):\n b += d[a[i]]\n return b\ndef uncode(d):\n uncode = {}\n for key, value in d.items():\n uncode[value] = key\n return uncode\nprint(cypher(c, g))\nprint(cypher(d, uncode(g)))" }, { "alpha_fraction": 0.41081079840660095, "alphanum_fraction": 0.4216216206550598, "avg_line_length": 22.125, "blob_id": "e35a367eab4f8d809beb1114231571f9ad791909", "content_id": "64157a156220be3636d99a1d2aeb86b1688f872d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 50, "num_lines": 8, "path": "/MyPr1/ex10.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "\nlst = [i.lower() for i in input().split()]\ns = {}\nfor i in lst:\n if s.get(i) == None:\n s[i] = 1\n else:\n s[i] += 1\nprint(*[i + ' ' + str(s[i]) for i in s], sep='\\n')" }, { "alpha_fraction": 0.6003372669219971, "alphanum_fraction": 0.6003372669219971, "avg_line_length": 22.760000228881836, "blob_id": "8f4afefea8a30bf4fe64ec43b19ba0d04d8d5c29", "content_id": "610afc42a15a1c74842f2e3f794d2dbaacb8aa13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 102, "num_lines": 25, "path": "/Python_stepik_2/3.5.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import json\n\n\ndef count_descendant(Vertex): # для класса Vertex возвращает всех потомков []\n\tch = [Vertex]\n\tfor i in js:\n\t\tif Vertex in i[\"parents\"]:\n\t\t\tch.append(i[\"name\"])\n\t\t\tch.extend(count_descendant(i[\"name\"]))\n\treturn ch\n\n\njs = json.loads(input())\nprint(*sorted([i[\"name\"] + ' : ' + str(len(set(count_descendant(i[\"name\"])))) for i in js]), sep='\\n')\n\n\n\n# import json\n#\n# cls = {c['name']: c['parents'] for c in json.loads(input())}\n#\n# isbase = lambda b, d: b == d or any(isbase(b, c) for c in cls[d])\n#\n# for p in sorted(cls):\n# print(p, ':', len({c for c in cls if isbase(p, c)}))" }, { "alpha_fraction": 0.5525835752487183, "alphanum_fraction": 0.5610942244529724, "avg_line_length": 21.243244171142578, "blob_id": "c01f5e9efb826bd08cadeae8fb6293a4299f3928", "content_id": "9f939bcde84169a0a33803f3395e4e3c06dd4c29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1737, "license_type": "no_license", "max_line_length": 83, "num_lines": 74, "path": "/Python_stepik_2/2.1.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "n = int(input())\nVertex = {} # вершина : множество прямых предков\n\n\ndef get_direct_ancestor(a, vertex): # возвращает множество всех предков\n\n\tstack = []\n\tfor i in vertex[a]:\n\t\tstack.append(i)\n\t\tif i:\n\t\t\tstack.extend(get_direct_ancestor(i, vertex))\n\treturn set(stack)\n\n\nfor i in range(n):\n\tVertexBuff = input().split()\n\tVertex[VertexBuff[0]] = set()\n\tif len(VertexBuff) != 1:\n\t\t[Vertex[VertexBuff[0]].add(i) for i in VertexBuff[2:]]\nq = int(input())\nex = [input() for i in range(q)]\nre = []\nfor i in range(len(ex) - 1, 0, -1):\n\tif ex[i] not in Vertex:\n\t\tcontinue\n\tanc = get_direct_ancestor(ex[i], Vertex)\n\tfor j in range(i):\n\t\tif ex[i] in re:\n\t\t\tcontinue\n\t\tif ex[j] == ex[i] or ex[j] in anc:\n\t\t\tre.append(ex[i])\nfor i in range(len(re) - 1, -1, -1):\n\tprint(re[i])\n\n# \tчто это за генератор?\n# def checkdup(d):\n# return cls[d] is None or any(map(checkdup, cls[d]))\n#\n# cls = {d: set(b[1:]) for _ in range(int(input())) for d, *b in [input().split()]}\n#\n# for _ in range(int(input())):\n# c = input()\n# if checkdup(c):\n# print(c)\n# cls[c] = None\n\n\n# # код преподавателя\n# n = int(input())\n# classes = {}\n# for i in range(n):\n# line = input()\n# parts = line.split(\" : \")\n# cls = parts[0]\n# if len(parts) == 1:\n# classes[cls] = []\n# else:\n# classes[cls] = parts[1].split(\" \")\n#\n#\n# def check(src, dest):\n# if src == dest:\n# return True\n# return any([check(child, dest) for child in classes[src]])\n#\n#\n# m = int(input())\n# used = []\n#\n# for i in range(m):\n# cls = input()\n# if any([check(cls, used_one) for used_one in used]):\n# print(cls)\n# used.append(cls)" }, { "alpha_fraction": 0.3731343150138855, "alphanum_fraction": 0.45895522832870483, "avg_line_length": 32.54166793823242, "blob_id": "8d45181b2b54373826f4c7fad51fd1cc2c095e86", "content_id": "c60dad311c2946929cf166fe94f10669a2d78117", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 161, "num_lines": 24, "path": "/MyPr1/ex3.7.5.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "with open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3380_5.txt', 'r') as file1:\n\td = {'1' : [0, 0], '2' : [0, 0], '3' : [0, 0], '4' : [0, 0], '5' : [0, 0], '6' : [0, 0], '7' : [0, 0], '8' : [0, 0], '9' : [0, 0], '10' : [0, 0], '11' : [0, 0]}\n\ta = file1.read().strip().split('\\n')\n\ta = [i.split() for i in a]\nfor ai in a:\n\td[ai[0]][0] += int(ai[2])\n\td[ai[0]][1] += 1\nfor key in d:\n\tif d[key] == [0, 0]:\n\t\td[key] = '-'\n\telse:\n\t\td[key] = str(d[key][0] / d[key][1])\nprint(*[i + ' ' + d[i] for i in d], sep='\\n')\n\n# d = {i: [] for i in range(1,12)}\n# with open(r'D:\\Новая папка\\dataset_3380_5.txt','r', encoding='utf-8') as f1:\n# for i in f1:\n# d[int(i.split()[0])].append(float(i.split()[2]))\n#\n# for i in range(1,12):\n# if d[i]:\n# print(i, sum(d[i])/len(d[i]))\n# else:\n# print(i, '-')" }, { "alpha_fraction": 0.6646825671195984, "alphanum_fraction": 0.6726190447807312, "avg_line_length": 23.047618865966797, "blob_id": "580ec1d93ba093f7874df3dd57cfba997dee2469", "content_id": "e0c2899ee67f19c0085a2c0f2db0c77da087b1f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 49, "num_lines": 21, "path": "/Python_stepik_2/2.1.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "try:\n\tfoo()\nexcept (AssertionError, ZeroDivisionError) as e:\n\tprint(type(e).__name__)\nexcept\tArithmeticError as e:\n\tprint('ArithmeticError')\n\n\n\n\n# except Exception as e:\n\t# print(str(type(e))[8:-2])\n\t# print(type(e).__name__)\n# \tif 'ZeroDivisionError' in str(type(e)):\n# \t\tprint(\"ZeroDivisionError\")\n# \telif 'ArithmeticError' in str(type(e)):\n# \t\tprint(\"ArithmeticError\")\n# except AssertionError as e:\n# \tprint(type(e).__name__)\n# print(type(AssertionError()))\n# print(str(type(AssertionError()))[8:-2])" }, { "alpha_fraction": 0.6084848642349243, "alphanum_fraction": 0.6278787851333618, "avg_line_length": 29.592592239379883, "blob_id": "996d0e463cd70fd5ec339de74ff800e8897c13b3", "content_id": "6206dc923a6a316974b4d2dd061dc33130338ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1044, "license_type": "no_license", "max_line_length": 106, "num_lines": 27, "path": "/Interview_Tasks/habr_1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# Есть файл, в котором содержаться слова разделённые пробелом.\n# Например: \"abba com mother bill mother com abba dog abba mother com\".\n# Нужно найти и вывести тройку слов, которые чаще всего встречаются вместе (порядок не имеет значения).\n# То есть в моём примере тройки слов это \"abba com mother\", \"com mother bill\", \"mother bill mother\" и т.д.\n# Тут правильным ответом должно быть \"abba com mother\" (частота — 3 раза).\n# https://habr.com/en/post/439576/\n\ns = \"abba com mother bill mother com abba dog abba mother com\".split()\na = []\nd = dict()\nmax_i = 0\nmax_s = ''\nfor i in range(1,len(s)-1):\n\tb = sorted([s[i - 1], s[i], s[i + 1]])\n\ta.append(b)\n\tc = str(b)\n\tif b in a:\n\t\tif c in d:\n\t\t\td[c] += 1\n\t\telse:\n\t\t\td[c] = 1\n\t\tif d[c] > max_i:\n\t\t\tmax_i = d[c]\n\t\t\tmax_s = s[i - 1] + ' ' + s[i] + ' ' + s[i + 1]\n\nprint(d)\nprint(max_s)" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 24.66666603088379, "blob_id": "38aa4010566c6fbd377706c19ad8f48309279acd", "content_id": "9aef216ae3c16cf94ddfb72d5ffd4ef1de97cd77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/MyPr1/codewars1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "def number_to_string(num):\n\treturn str(num)\nprint(number_to_string(input()))" }, { "alpha_fraction": 0.45652174949645996, "alphanum_fraction": 0.489130437374115, "avg_line_length": 22.08333396911621, "blob_id": "60d9e47cd68d7b50be0b8e461cad6dcb72a286e3", "content_id": "7029cc47bb7d02a0a41ce363c0427fb5c75c8bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/MyPr1/example1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "numbers = [int(i) for i in input().split()]\nsums = ''\nl = len(numbers)\nif l == 1:\n sums = str(numbers[0])\nelse:\n j = 0\n while j < l - 1:\n sums += str(numbers[j - 1] + numbers[j + 1]) + ' '\n j += 1\n sums += str(numbers[l - 2] + numbers[0])\nprint(sums)" }, { "alpha_fraction": 0.6374622583389282, "alphanum_fraction": 0.6374622583389282, "avg_line_length": 22.5, "blob_id": "c82d11a570760586cd5505d197a92c2dd9d60bea", "content_id": "1795de39c5aecd22d5a29f8390c815f72f8bbd1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 75, "num_lines": 14, "path": "/Python_stepik_2/2.2.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import simplecrypt\n\nwith open(\"encrypted.bin\", \"rb\") as inp, open(\"passwords.txt\", \"r\") as pas:\n\tencrypted = inp.read()\n\t# a = pas.read().strip().split('\\n')\n\tfor i in pas:\n\t\ta = i.strip()\n\t\ttry:\n\t\t\ts = simplecrypt.decrypt(a, encrypted)\n\t\texcept simplecrypt.DecryptionException:\n\t\t\tprint('Error')\n\t\telse:\n\t\t\tprint(a)\n\t\t\tprint(s)\n\n\n" }, { "alpha_fraction": 0.48759007453918457, "alphanum_fraction": 0.5572457909584045, "avg_line_length": 20.55172348022461, "blob_id": "83e9480b9a6f6ceb6efa7fb275f7576a5cca2b56", "content_id": "9bdb3361b56da7a89e8de076809112b3b57c1a21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 64, "num_lines": 58, "path": "/Python_stepik_2/1.3.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "def closest_mod_5(x):\n\treturn (x if x % 5 == 0 else x + (5 - x % 5))\nprint(closest_mod_5(int(input())))\n\n# closest_mod_5 = lambda x: (x + 4) // 5 * 5\n\n# a=1\n# b=2\n# lst = [5,6,7,8]\n# dct = {\"1\":11,\"2\":22}\n# print(a, b, lst, dct)\n#\n# def pr(aa, bb, cc, *llst, **ddct):\n# print(aa) #=a\n# print(bb) #=b\n# print(cc) #возьмётся из списка\n# print(llst) #остатки списка в виде кортежа\n# print(lst) #глобальная переменная\n# for k in ddct:\n# print(k, ddct[k])\n# print(ddct) #словарь в к-й попали dct, r=8,t=6\n# print(*llst) #элементы кортежа\n# print(*ddct) #ключи словаря\n# print(*dct) #глобальная переменная\n# pr(a, b, *lst, **dct, r=8,t=6)\n# 1 2 [5, 6, 7, 8] {'1': 11, '2': 22}\n# 1\n# 2\n# 5\n# (6, 7, 8)\n# [5, 6, 7, 8]\n# 1 11\n# 2 22\n# r 8\n# t 6\n# {'1': 11, '2': 22, 'r': 8, 't': 6}\n# 6 7 8\n# 1 2 r t\n# 1 2\n\nКод проверки времени выполнения функций\nimport time\ndef one(a, b, c):\n\treturn a + b + c\n\n\ndef two(*args):\n\treturn args[0] + args[1] + args[2]\ncount = 9000000\n_startTime = time.time()\nfor num in range(count):\n\tone(1, 2, 3)\nprint(\"Positional test:\", time.time() - _startTime)\n\n_startTime = time.time()\nfor num in range(count):\n\ttwo(1, 2, 3)\nprint(\"Positional args as list test:\", time.time() - _startTime)" }, { "alpha_fraction": 0.4267834722995758, "alphanum_fraction": 0.46057572960853577, "avg_line_length": 24, "blob_id": "bd284837e79d5efee2efbfea4ab10d0b29403308", "content_id": "1d245b720e7145422cada3b137b9ab314de46526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 84, "num_lines": 32, "path": "/MyPr1/ex13.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# s = ''\n# a = []\n# line = ''\n# with open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_3.txt', 'r') as inf:\n# for line in inf:\n# line += line.strip().lower() + ' '\n# for ltr in line:\n# if ltr != ' ':\n# s += ltr\n# else:\n# a.append(s)\n# s = ''\n# s = {}\n# max = 0\n# max_w = ''\n# for i in a:\n# if s.get(i) == None:\n# s[i] = 1\n# else:\n# s[i] += 1\n# if max < s[i]:\n# max = s[i]\n# max_w = i\n#\n# print(max_w, s[max_w], sep=' ')\n\nwith open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_3.txt', 'w') as inf:\n inf.write('12\\n')\n inf.write('1234\\n')\nwith open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_3.txt', 'r') as inf:\n line = inf.read()\nprint(line)" }, { "alpha_fraction": 0.6388888955116272, "alphanum_fraction": 0.6419752836227417, "avg_line_length": 17.05555534362793, "blob_id": "645c605853ca2260fb5b6d03c272c9edd2db91d1", "content_id": "387366f1260bc2528fc049675effd4821597f6c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/Python_stepik_2/3.3.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\npattern = r\"cat\"\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tmatch_object = re.findall(pattern, line)\n\tif len(match_object) >= 2:\n\t\tprint(line)\n\n#catcatsghdtscat\n# import re\n# import sys\n#\n# for line in sys.stdin:\n# line = line.strip()\n# if re.search(r\"cat.*cat\", line):\n# print(line)" }, { "alpha_fraction": 0.40112993121147156, "alphanum_fraction": 0.4067796468734741, "avg_line_length": 13.75, "blob_id": "fde79baf9072f84fb3ea4642d06117bd565e0577", "content_id": "b7630c4a4aa2a060ee60210b01eba1b6e9ff9acb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 20, "num_lines": 12, "path": "/MyPr1/ex11.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "def f(x):\n return x + 1\n\nn = int(input())\nb = {}\nfor i in range(n):\n a = int(input())\n if a in b:\n print(b[a])\n else:\n b[a] = f(a)\n print(b[a])\n" }, { "alpha_fraction": 0.7093023061752319, "alphanum_fraction": 0.7093023061752319, "avg_line_length": 18.22222137451172, "blob_id": "70031e6e790ca23c3bd652b6d459d0e33adc016e", "content_id": "a92e5966fbcc3aa1fe1075550a55c2a9227150ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/Python_stepik_2/3.3.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\npattern = r\"\\bcat\\b\"\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tmatch_object = re.search(pattern, line)\n\tif match_object is not None:\n\t\tprint(line)" }, { "alpha_fraction": 0.6648044586181641, "alphanum_fraction": 0.6703910827636719, "avg_line_length": 18.55555534362793, "blob_id": "4b94f35ae6290fa404558cf9c042a767b450986b", "content_id": "0999e7eb1f35623347f73c00f6a7f13fc82813df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/Python_stepik_2/3.3.5.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\npattern = r\"\\b(.+)\\1\\b\"\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tmatch_object = re.search(pattern, line)\n\tif match_object is not None:\n\t\tprint(line)\n\n\n\n" }, { "alpha_fraction": 0.6766917109489441, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 32.5, "blob_id": "eacd28b3222d72abd6ee9b5a5cadf68259965f66", "content_id": "44768accd9dc247f4d282df7bf7d3c275058889d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 79, "num_lines": 4, "path": "/MyPr1/ex16.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import requests\nr = requests.get('https://stepic.org/media/attachments/course67/3.6.2/983.txt')\nb = r.text.splitlines()\nprint(len(b))" }, { "alpha_fraction": 0.5966101884841919, "alphanum_fraction": 0.6203389763832092, "avg_line_length": 13.047618865966797, "blob_id": "3c264c182393a726f6a223676a35d8b91de4efc9", "content_id": "9f3d578f29667b01761d0bde965d46e8a652cc44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 61, "num_lines": 21, "path": "/Python_stepik_2/2.3.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import math\nimport itertools\n\ndef primes():\n\tcount = 2\n\n\twhile True:\n\t\tisprime = True\n\n\t\tfor x in range(2, int(math.sqrt(count) + 1)):\n\t\t\tif count % x == 0:\n\t\t\t\tisprime = False\n\t\t\t\tbreak\n\n\t\tif isprime:\n\t\t\tyield count\n\n\t\tcount += 1\n\n\nprint(list(itertools.takewhile(lambda x: x <= 31, primes())))\n" }, { "alpha_fraction": 0.3937007784843445, "alphanum_fraction": 0.4724409580230713, "avg_line_length": 13.222222328186035, "blob_id": "82932a0f7775c5193fdb3f8ad6545894af82bb8c", "content_id": "498df92f3b6d3bfe1f7eb7dcc61a5815f93bbe01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 20, "num_lines": 9, "path": "/MyPr1/ex3.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "sum1 = 0\nsum2 = 0\nwhile True:\n a = int(input())\n sum1 += a\n sum2 += a ** 2\n if sum1 == 0:\n break\nprint(sum2)" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 25.75, "blob_id": "073cdce3d5ffc02e889f98a63ce3d4b5a6430941", "content_id": "f56217f6bd0f4d3391d252bb8323384fb3f6bf24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/Python_stepik_2/1.6.3.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "class LoggableList(list, Loggable):\n\tdef append(self, object):\n\t\tsuper().append(object)\n\t\tself.log(object)\n\n" }, { "alpha_fraction": 0.6722221970558167, "alphanum_fraction": 0.6722221970558167, "avg_line_length": 29.16666603088379, "blob_id": "a38ba225262f080ba9dbfd617405fe24c81268ef", "content_id": "4c7fc8b5b23840f34b7397aead5d42590b045e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 81, "num_lines": 6, "path": "/Python_stepik_2/2.2.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import datetime\n\nyyyy, mm, dd = input().split()\ndelta = int(input())\nd = datetime.date(int(yyyy), int(mm), int(dd)) + datetime.timedelta(days = delta)\nprint(d.year, d.month, d.day)" }, { "alpha_fraction": 0.5845070481300354, "alphanum_fraction": 0.6338028311729431, "avg_line_length": 16.875, "blob_id": "8a085c15551c7f991a014512138e557f5d65d3e3", "content_id": "1671f6a7d877cc5e4af713b7bf04bd9ce837d23c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 42, "num_lines": 8, "path": "/Python_stepik_2/3.3.10.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\nfor line in sys.stdin:\n\tline = line.rstrip()\n\ts = re.fullmatch(r\"(1(01*0)*1|0)*\", line)\n\tif s is not None:\n\t\tprint(s[0])" }, { "alpha_fraction": 0.549199104309082, "alphanum_fraction": 0.5606407523155212, "avg_line_length": 24.735294342041016, "blob_id": "d8fa33e64296e1986a8a021b0cdc7866e6410383", "content_id": "d3d55582c8c2c6f4daba139e217bae296276038b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "no_license", "max_line_length": 112, "num_lines": 34, "path": "/Python_stepik_2/3.4.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport requests\n\nhtml_file = input()\n# html_file = 'http://pastebin.com/raw/7543p0ns'\n\nlink_pattern = re.compile(r'<a[^>]*?href=(?:\\'|\\\")(?:https://|http://|ftp://|)(.*?)[\\\"|\\'|/|:]')\nresp = requests.get(html_file).text\nurl = link_pattern.findall(resp)\nurl2 = []\nfor i in url:\n\tif not i.startswith(\"..\"):\n\t\turl2.append(i)\nurl2 = sorted(list(set(url2)))\n# url = sorted(url)\n# url = set(url)\n# match_object = re.search(pattern, line)\nprint(*url2, sep='\\n')\n\n# import re\n# import requests\n#\n# resp = requests.get(input()).text\n# sites = set()\n# for site in re.findall(r'<a.*?href=\".*?:\\/\\/((?:\\w|-)+(?:\\.(?:\\w|-)+)+)', resp):\n# sites.add(site)\n#\n# for site in sorted(sites):\n# print(site)\n\n# import requests, re\n# urls = set(re.findall(r\"(?:.*)?(?:<a(?:.*)? href=[\\\"\\'])(?:\\w+://)?(\\w[\\w\\.-]*)\", requests.get(input()).text))\n#\n# print('\\n'.join(sorted(urls)))" }, { "alpha_fraction": 0.5400350689888, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 27.065574645996094, "blob_id": "dd34041f86bfe6e0c9a173744b47da7b9feef8de", "content_id": "dd103ca47b06efd5762ca7f8eaa2f223537568be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1861, "license_type": "no_license", "max_line_length": 110, "num_lines": 61, "path": "/Python_stepik_2/3.6.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import requests\nimport json\n# import sqlite3\n# from sqlite3 import Error\n#\n#\n# def create_connection(path):\n# connection = None\n# try:\n# connection = sqlite3.connect(path)\n# print(\"Connection to SQLite DB successful\")\n# except Error as e:\n# print(f\"The error '{e}' occurred\")\n#\n# return connection\n#\n#\n# connection = create_connection(r\"C:\\Users\\arkka\\PycharmProjects\\Python_tasks\\Python_stepik_2\\sm_app.sqlite\")\n# c = connection.cursor()\n# c.execute('''CREATE TABLE stocks\n# (name text, date text)''')\n\n\nclient_id = '...'\nclient_secret = '...'\n# инициируем запрос на получение токена\nr = requests.post(\"https://api.artsy.net/api/tokens/xapp_token\",\n\t\t\t\t data={\"client_id\": 'edb1081cc97bfe18a1e2',\n\t\t\t\t\t\t\"client_secret\": 'cdc884c2e9ea1c25cd0de823fa721545'})\n\n# разбираем ответ сервера\nj = json.loads(r.text)\n\n# достаем токен\ntoken = j[\"token\"]\n# создаем заголовок, содержащий наш токен\nheaders = {\"X-Xapp-Token\" : token}\n# инициируем запрос с заголовком\ns = '''4f5f64c13b555230ac000004\n503649d96cb8020002000dd0\n519258cc9e628509c40000d7\n56d6f872139b2166eb000ade\n4e96f7705554c900010027db\n4f552b2e3b55524170000003\n511d5a7994326bf7f10005c6\n53fb955e72616951aa090000\n545bef0b72616949b5310700\n53d8b30672616913c7e40200\n551bc2ca7261692b55981300\n4f86f41a24907b0001000d46\n515b34a9223afaab8f000905\n4e9314fd57d697000100133c\n55255e8d7261695ba22f0300\n'''.split()\nlst = dict()\nfor i in s:\n\tr = requests.get(\"https://api.artsy.net/api/artists/\" + i, headers=headers)\n\tj = json.loads(r.text)\n\tlst[j['sortable_name']] = int(j['birthday'])\nlst = sorted(lst.items(), key=lambda x: (x[1], x[0])) # 2-а аргумента работают как в sort by в SQL\nprint(*[i[0] for i in lst], sep='\\n')" }, { "alpha_fraction": 0.38532111048698425, "alphanum_fraction": 0.4013761579990387, "avg_line_length": 20.799999237060547, "blob_id": "fe54df680c4ff43ef7e93dd54ff2ee093e0484ad", "content_id": "77d17d6c537e72891a206f1192b1b8803a37d0f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 103, "num_lines": 20, "path": "/MyPr1/ex6.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "j = 0\na = []\nb = []\nc = []\nfl = True\nwhile fl:\n a = [i for i in input().split()]\n # b [j] += a\n if a == ['end']:\n fl = False\n else:\n b.append(a)\n j += 1\nc = [[0 for j in range(len(b[i]))]for i in range(len(b))]\nn = len(b)\nfor i in range(n):\n m = len(b[i])\n for j in range(m):\n c[i][j] = int(b[i - 1][j]) + int(b[i - n + 1][j]) + int(b[i - n][j - 1]) + int(b[i][j - m + 1])\n print(*c[i])\n" }, { "alpha_fraction": 0.5501729846000671, "alphanum_fraction": 0.5709342360496521, "avg_line_length": 76.13333129882812, "blob_id": "27c73cb18d3a10e7b2fa07038248ab44c7eb7ec1", "content_id": "652b85771bdecf6dcca8ccbea830226cc0af0aa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1156, "license_type": "no_license", "max_line_length": 205, "num_lines": 15, "path": "/MyPr1/ex14.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# with open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_4.txt', 'r') as inf:\n# a = []\n# a = inf.read().strip().split('\\n')\n# for i in range(len(a)):\n# a[i] = a[i].split(';')\n# b = len(a)\n# with open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_4.txt', 'w') as inf:\n# [inf.write(str((int(mat) + int(phys) + int(rus)) / 3) + '\\n') for lname, mat, phys, rus in a]\n# inf.write(str(sum([int(math) for lname, math, phys, rus in a]) / b) + ' ' + str(sum([int(phys) for lname, math, phys, rus in a]) / b) + ' ' + str(sum([int(rus) for lname, math, phys, rus in a]) / b))\n\n # [print((int(mat) + int(phys) + int(rus)) / 3) for lname, mat, phys, rus in a]\n # print(str(sum([int(math) for lname, math, phys, rus in a]) / b) + ' ' + str(sum([int(phys) for lname, math, phys, rus in a]) / b) + ' ' + str(sum([int(rus) for lname, math, phys, rus in a]) / b))\nst = [x.split(';') for x in open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_4.txt').readlines()]\nprint(*[sum([int(y) for y in x[1:]])/3 for x in st], sep='\\n')\nprint(*[sum([int(y) for y in [st[x][z] for x in range(len(st))]])/len(st) for z in range(1,4)])" }, { "alpha_fraction": 0.5272118449211121, "alphanum_fraction": 0.5556796193122864, "avg_line_length": 27.44444465637207, "blob_id": "e0b0c2aa50ad1da9a7f7799cfb4e58ae8392b5df", "content_id": "3e4d4933356197abf74341484ac2978f482a78e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4283, "license_type": "no_license", "max_line_length": 106, "num_lines": 126, "path": "/Python_stepik_2/2.3.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# class Iterformultifilter:\n\n\nclass multifilter:\n def judge_half(pos, neg):\n # допускает элемент, если его допускает хотя бы половина фукнций (pos >= neg)\n return pos >= neg\n\n def judge_any(pos, neg):\n # допускает элемент, если его допускает хотя бы одна функция (pos >= 1)\n return pos >= 1\n\n def judge_all(pos, neg):\n # допускает элемент, если его допускают все функции (neg == 0)\n return neg == 0\n\n def __init__(self, iterable, *funcs, judge=judge_any):\n # iterable - исходная последовательность\n # funcs - допускающие функции\n # judge - решающая функция\n self.i = 0\n self.judge = judge\n self.funcs = funcs\n self.iterable = iterable\n self.k = len(self.iterable)\n\n def __iter__(self):\n # возвращает итератор по результирующей последовательности\n return self\n\n def __next__(self):\n while self.i < self.k:\n pos = 0\n neg = 0\n for _ in self.funcs:\n if _(self.iterable[self.i]):\n pos += 1\n else:\n neg += 1\n self.i += 1\n if self.judge(pos, neg):\n return self.iterable[self.i - 1]\n raise StopIteration\n\n\n\n\n\n\n\ndef mul2(x):\n return x % 2 == 0\n\ndef mul3(x):\n return x % 3 == 0\n\ndef mul5(x):\n return x % 5 == 0\n\na = [i for i in range(31)] # [0, 1, 2, ... , 30]\nb = multifilter(a, mul2, mul3, mul5)\nprint(list(b))\nprint(list(b))\n# [0, 2, 3, 4, 5, 6, 8, 9, 10, 12, 14, 15, 16, 18, 20, 21, 22, 24, 25, 26, 27, 28, 30]\n\n\n# print(list(multifilter(a, mul2, mul3, mul5, judge=multifilter.judge_half)))\n# # [0, 6, 10, 12, 15, 18, 20, 24, 30]\n#\n# print(list(multifilter(a, mul2, mul3, mul5, judge=multifilter.judge_all)))\n# # [0, 30]\n\n\n# # с применением генератора в одной строке\n# class multifilter:\n# judge_half = lambda fx: sum(fx) >= len(fx) / 2\n# judge_any = lambda fx: any(fx)\n# judge_all = lambda fx: all(fx)\n#\n# def __init__(self, iterable, *funcs, judge=judge_any):\n# self.iterable = iterable\n# self.funcs = funcs\n# self.judge = judge\n#\n# def __iter__(self):\n# return (x for x in self.iterable if self.judge([f(x) for f in self.funcs]))\n\n\n# собственное решение через yield (вместо __next__)\n#\n# логика работы прописана в итераторе объекта __iter__\n# где пробегаемся по элементам исходной последовательности\n# просеиваем элемент через функции-фильтры\n# и если выбранная функция judge дала добро\n# то yield`им этот элемент\n#\n#\n#\n\n# # решение с yield в iter\n# class multifilter:\n# def judge_half(pos, neg):\n# return pos >= neg # допускает элемент, если его допускает хотя бы половина фукнций (pos >= neg)\n#\n# def judge_any(pos, neg):\n# return pos >= 1 # допускает элемент, если его допускает хотя бы одна функция (pos >= 1)\n#\n# def judge_all(pos, neg):\n# return neg == 0 # допускает элемент, если его допускают все функции (neg == 0)\n#\n# def __init__(self, iterable, *funcs, judge=judge_any):\n# self.lst = iterable # iterable - исходная последовательность\n# self.funcs = funcs # funcs - допускающие функции\n# self.judge = judge # judge - решающая функция\n#\n# def __iter__(self):\n# for el in self.lst:\n# pos, neg = 0, 0\n# for f in self.funcs:\n# pass\n# if f(el):\n# pos += 1\n# else:\n# neg += 1\n# if self.judge(pos, neg):\n# yield el" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.6200000047683716, "avg_line_length": 19.200000762939453, "blob_id": "5c5a923429492556e6caf3b1e10d72a1675be624", "content_id": "d108970f7d33e8326c36f572dfdc1e067014f74b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 30, "num_lines": 5, "path": "/Python_stepik_2/2.5.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "def mod_checker(x, mod=0):\n\treturn lambda y: y % x == mod\n\nmod_3 = mod_checker(3, 1)\nprint(mod_3(4))" }, { "alpha_fraction": 0.7330895662307739, "alphanum_fraction": 0.7586837410926819, "avg_line_length": 41.07692337036133, "blob_id": "9511b7f652c20f4ee353c39976c5733ee7489a89", "content_id": "0031654e8d97abca3d5f52e261eb223329012ae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "no_license", "max_line_length": 119, "num_lines": 13, "path": "/Django_projects/vk_oauth/vk_oauth/apps/articles/models.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Article(models.Model):\n\tarticle_title = models.CharField('название статьи', max_length=200) # VarChar in MySQL - 200-300 simbols?\n\tarticle_text = models.TextField('текст статьи')\n\tpub_date = models.DateTimeField('дата публикации')\n\n\nclass Comment(models.Model):\n\tarticle = models.ForeignKey(Article, on_delete=models.CASCADE) #on_delete - delete all its comments if delete article\n\tauthor_name = models.CharField('имя автора', max_length=50)\n\tcomment_text = models.CharField('текст комментария', max_length=200)\n" }, { "alpha_fraction": 0.3900621235370636, "alphanum_fraction": 0.47577640414237976, "avg_line_length": 30, "blob_id": "ed831e5b9684586bedf787e2de269b5b5c9b58da", "content_id": "b0cbc3a72d140dd54b3fe72840316a26eb5813c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 84, "num_lines": 26, "path": "/MyPr1/ex12.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# import os\n# s = ''\n# t = ''\n# num = '1'\n# with open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_2.txt', 'r') as inf:\n# line = inf.readline().strip()\n# for i in range(len(line)):\n# if line[i] in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']:\n# num += line[i]\n# elif t != '':\n# s += t * int(num)\n# t = line[i]\n# num = ''\n# else:\n# t = line[i]\n# num = ''\n# s += t * int(num)\n# with open(r'C:\\Users\\arkka\\PycharmProjects\\MyPr1\\dataset_3363_2.txt', 'w') as inf:\n# inf.write(s + '\\n')\n\n\ns, d = 'D8s11T20s13r16V8O9o11f3V3S5B14T11m12n18i17T18x15d17V1b5l2x13I5t12q7c14', []\nfor i in s:\n if not i.isdigit(): d.append(i)\n else: d[-1] += i\nprint(*[i[0]*int(i[1:]) for i in d], sep='')" }, { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 18.44444465637207, "blob_id": "77448c835d5001a0646d2d046c6839203fb0585a", "content_id": "e8b24b6ada1d429440b15ee85d309f6dc9748360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 63, "num_lines": 9, "path": "/Python_stepik_2/3.3.6.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tprint(re.sub(\"human\", \"computer\", line))\n\n\n# print(re.sub(r'human', 'computer', sys.stdin.read()), end='')" }, { "alpha_fraction": 0.6403940916061401, "alphanum_fraction": 0.6403940916061401, "avg_line_length": 34.034481048583984, "blob_id": "50c84dbf33db6231fde4963ea5d754abfc39e455", "content_id": "562320c846388f19bd4e002df680a8f95305cd71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1339, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/MyPr1/ex3.7.3.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "a = int(input())\nb = set()\n[b.add(*input().lower().split('\\n')) for i in range(a)]\nc = int(input())\nd = set()\nfor i in range(c):\n g = input().lower().split(' ')\n [d.add(i) for i in g]\n[print(i) for i in d if i not in b]\n\n# # # формируем множество известных слов на основании построчного ввода\n# # dic = {input().lower() for _ in range(int(input()))}\n# #\n# # # заводим пустое множество для приема текста\n# wrd = set()\n#\n# # т.к. текст построчно подается, а также в каждой строке несколько слов,\n# # то каждую строку превращаем во множество и добавляем в единое множество wrd\n# for _ in range(int(input())):\n# wrd |= {i.lower() for i in input().split()}\n#\n# # на вывод отправляем результат вычитания словарного множества dic\n# # из текстового множества wrd; впереди ставим *, чтобы раскрыть поэлементно\n# print(*(wrd-dic), sep=\"\\n\")\n\n\n# words = set(input().lower() for i in range(int(input())))\n# text = set(('\\n'.join(input().lower() for i in range(int(input())))).split())\n# print('\\n'.join(text - words))" }, { "alpha_fraction": 0.625261664390564, "alphanum_fraction": 0.6329379081726074, "avg_line_length": 27.117647171020508, "blob_id": "0d844e6856effdcc44170acb6c9ea5a4d85e3d2a", "content_id": "63821cb3d99617e244763533ab9e20616cf83c1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1433, "license_type": "no_license", "max_line_length": 109, "num_lines": 51, "path": "/Python_stepik_2/3.7.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# from xml.etree import ElementTree\nfrom xml.etree.ElementTree import XMLParser\n\n# s = input()\ns = '<cube color=\"blue\"><cube color=\"red\"><cube color=\"green\"></cube></cube><cube color=\"red\"></cube></cube>'\n\n\nclass ColorCount: # The target object of the parser\n\tdepth = 0\n\tred_count = 0\n\tgreen_count = 0\n\tblue_count = 0\n\n\tdef start(self, tag, attrib): # Called for each opening tag.\n\t\tself.depth += 1\n\t\tif attrib['color'] == 'blue':\n\t\t\tself.blue_count += self.depth\n\t\telif attrib['color'] == 'red':\n\t\t\tself.red_count += self.depth\n\t\telif attrib['color'] == 'green':\n\t\t\tself.green_count += self.depth\n\n\tdef end(self, tag): # Called for each closing tag.\n\t\tself.depth -= 1\n\n\tdef data(self, data):\n\t\tpass # We do not need to do anything with data.\n\n\tdef close(self): # Called when all data has been parsed.\n\t\treturn str(self.red_count) + ' ' + str(self.green_count) + ' ' + str(self.blue_count)\n\t\t# return self.red_count + self.green_count + self.blue_count\n\n\ntarget = ColorCount()\nparser = XMLParser(target=target)\nparser.feed(s)\nprint(parser.close())\n\n\n# from xml.etree import ElementTree\n#\n# root = ElementTree.fromstring(input())\n# colors = {\"red\": 0, \"green\": 0, \"blue\": 0}\n#\n# def getcubes(root, value):\n# colors[root.attrib['color']] += value\n# for child in root:\n# getcubes(child, value+1)\n#\n# getcubes(root,1)\n# print(colors[\"red\"], colors[\"green\"], colors[\"blue\"])" }, { "alpha_fraction": 0.25655171275138855, "alphanum_fraction": 0.33241379261016846, "avg_line_length": 25.851852416992188, "blob_id": "96c8b0d6982272fabb8feec4bdae32c0e0eac062", "content_id": "fc163b2c9720b0084634c47a50c70e888efd94a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 151, "num_lines": 27, "path": "/MyPr1/ex18.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "n = int(input())\ntab = [input().split(';') for i in range(n)]\nd = {}\nfor i in tab:\n if i[0] not in d:\n d[i[0]] = [0]*4\n if i[2] not in d:\n d[i[2]] = [0] * 4\n if i[1] > i[3]:\n d[i[0]][1] += 1\n d[i[2]][3] += 1\n d[i[0]][0] += 1\n d[i[2]][0] += 1\n elif i[1] < i[3]:\n d[i[0]][3] += 1\n d[i[2]][1] += 1\n d[i[0]][0] += 1\n d[i[2]][0] += 1\n else:\n d[i[0]][2] += 1\n d[i[2]][2] += 1\n d[i[0]][0] += 1\n d[i[2]][0] += 1\n\n#Всего_игр - Побед - Ничьих - Поражений - Всего_очков\n\nprint(*[i + ':' + str(d[i][0]) + ' ' + str(d[i][1]) + ' ' + str(d[i][2]) + ' ' + str(d[i][3]) + ' ' + str((d[i][1]*3 + d[i][2])) for i in d], sep='\\n')\n" }, { "alpha_fraction": 0.5196506381034851, "alphanum_fraction": 0.5414847135543823, "avg_line_length": 19.909090042114258, "blob_id": "0cd14335ce1c524561303dc697f55e9e8ff057a6", "content_id": "8355343dea01f8993a5a6a50954b924f7704b81e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/Python_stepik_2/3.3.8.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tprint(re.sub(r\"\\b(\\w)(\\w)(\\w*)\\b\", r\"\\2\\1\\3\", line))\n\n# #!!\n# for line in sys.stdin:\n# line = line.rstrip()\n# print(re.sub(r'\\b(\\w)(\\w)', r\"\\2\\1\", line))" }, { "alpha_fraction": 0.596581220626831, "alphanum_fraction": 0.602849006652832, "avg_line_length": 24.434782028198242, "blob_id": "a38fc0cf5d6f7d6ff157cda6ace5c6da088c00e2", "content_id": "25b159d681f09951d51a25d197b8160ee374b306", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1867, "license_type": "no_license", "max_line_length": 88, "num_lines": 69, "path": "/Python_stepik_2/1.6.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "n = int(input())\nVertex = {} # вершина : множество прямых предков\n\n\ndef get_direct_ancestor(a, vertex): # возвращает множество всех предков\n\n\tstack = []\n\tfor i in vertex[a]:\n\t\tstack.append(i)\n\t\tif i:\n\t\t\tstack.extend(get_direct_ancestor(i, vertex))\n\treturn set(stack)\n\n\nfor i in range(n):\n\tVertexBuff = input().split()\n\tVertex[VertexBuff[0]] = set()\n\tif len(VertexBuff) != 1:\n\t\t[Vertex[VertexBuff[0]].add(i) for i in VertexBuff[2:]]\n# print(Vertex)\n\nq = int(input())\nAnswers = ''\nfor i in range(q):\n\tAncestor, Descendant = input().split()\n\tif Ancestor not in Vertex or Descendant not in Vertex:\n\t\tAnswers += 'No\\n'\n\t\tcontinue\n\tanc = get_direct_ancestor(Descendant, Vertex)\n\t# print(Descendant + ' ' + str(anc))\n\tif Ancestor in anc or Ancestor == Descendant:\n\t\tAnswers += 'Yes\\n'\n\telif Ancestor not in anc:\n\t\tAnswers += 'No\\n'\n\telse:\n\t\tAnswers += 'No\\n'\nprint(Answers)\n\n# Так себе код, но интересно применение map и lambda\n# n = int(input())\n#\n# parents = {}\n# for _ in range(n):\n# a = input().split()\n# parents[a[0]] = [] if len(a) == 1 else a[2:]\n#\n# def is_parent(child, parent):\n# return child == parent or any(map(lambda p: is_parent(p, parent), parents[child]))\n#\n# q = int(input())\n# for _ in range(q):\n# a, b = input().split()\n# print(\"Yes\" if is_parent(b, a) else \"No\")\n\n\n# # Вот еще неплохое решение\n# def test(parent, child):\n# if parent == child or parent in base[child]:\n# return 'Yes'\n# for i in base[child]:\n# if test(parent, i) == 'Yes':\n# return 'Yes'\n# return 'No'\n#\n# base = {}\n# for com in [input().split(' ') for i in range(int(input()))]:\n# base[com[0]] = com[2:len(com)]\n# for com in [input().split(' ') for i in range(int(input()))]:\n# print (test(com[0], com[1]))\n" }, { "alpha_fraction": 0.34775087237358093, "alphanum_fraction": 0.3771626353263855, "avg_line_length": 18.931034088134766, "blob_id": "2ee0d9437a8397f9e4e78c3553a5a88157f5fbd1", "content_id": "2f4e7322bf3dda6a32edb9f36a1251d514b48ef7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 43, "num_lines": 29, "path": "/MyPr1/ex8.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "\n# def modify_list(l):\n# i = 0\n# while True:\n# if l[i] % 2 != 0:\n# del l[i]\n# else:\n# i += 1\n# if i > len(l) - 1:\n# break\n# for i in range(len(l)):\n# l[i] //= 2\n# def modify_list(l):\n# l[:] = [i//2 for i in l if not i % 2]\n\n\n# lst = lst[:]\n# modify_list(lst)\n\nlst = [1, 2, 3, 4, 5, 6]\n# def modify_list(l):\n# for i in range(len(l)):\n# print(list(range(len(l))))\n# if l[i] % 2 == 1:\n# del l[i]\n# else:\n# l[i] //= 2\n\nmodify_list(lst)\nprint(lst)" }, { "alpha_fraction": 0.6933962106704712, "alphanum_fraction": 0.7028301954269409, "avg_line_length": 14.214285850524902, "blob_id": "3a334e4d732a1d0d3c21751dea83035c95610486", "content_id": "398a360ec32f72b6484327d742a33306a63c3930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 45, "num_lines": 14, "path": "/Python_stepik_2/2.1.3.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "class NonPositiveError(Exception):\n\tpass\n\n\nclass PositiveList(list):\n\tdef append(self, x):\n\t\tif x <= 0:\n\t\t\traise NonPositiveError('NonPositiveError')\n\t\tsuper().append(x)\n\n\ng = PositiveList()\ng.append(-1)\nprint(g)" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 10, "blob_id": "3bc37e5a5b0815095fa55475caa2d63d43dc8448", "content_id": "64d687ee14bbeb6f8b1ee3cbd0ad18c1dfc3da02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 14, "num_lines": 5, "path": "/MyPr1/ex15.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import sys\na = []\na =sys.argv\na.remove(a[0])\nprint(*a)" }, { "alpha_fraction": 0.5688073635101318, "alphanum_fraction": 0.5871559381484985, "avg_line_length": 14.428571701049805, "blob_id": "c3e9ba6d9493fec410757ba139a629f3ae94ec21", "content_id": "a4c042176a7b462641b83445ab1b23dff7f75cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/Python_stepik_2/3.2.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "s = input()\nt = input()\ncount = 0\nfor i in range(len(s)):\n\tif s.startswith(t, i):\n\t\tcount += 1\nprint(count)\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5277777910232544, "avg_line_length": 22.294116973876953, "blob_id": "036bbd075b2a62d7a9723dda9493416fe9315d7b", "content_id": "8731bea8dcfa9c17279be8346d963757e12021a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/MyPr1/ex2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "numbers = [int(i) for i in input().split()]\nnumbers.sort()\nj = 0\nflag_1 = False\nwhile len(numbers) - 1 > j:\n if numbers[j] == numbers[j + 1]:\n flag_1 = True\n del numbers[j + 1]\n elif not flag_1:\n del numbers[j]\n else:\n flag_1 = False\n j += 1\nif j == len(numbers) - 1 and flag_1 == False:\n del numbers[j]\nfor i in numbers:\n print(str(i), end=' ')\n" }, { "alpha_fraction": 0.319029837846756, "alphanum_fraction": 0.3606965243816376, "avg_line_length": 26.27118682861328, "blob_id": "47e2769452f4b8e3c993758508e304b29c823524", "content_id": "a734379f54a5a20a39156308b77a3104a293987f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1608, "license_type": "no_license", "max_line_length": 65, "num_lines": 59, "path": "/MyPr1/ex7.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# n = int(input())\n# n2 = n ** 2\n# b = [[0 for j in range(n)] for i in range(n)]\n# # b[0] = [i + 1 for i in range(n)]\n# dir_i = 0\n# dir_j = 1\n# p_i = 0\n# p_j = 0\n# val = 0\n# # for i in range(1 + 2 * (n - 1)):\n# the_end = False\n# while not the_end:\n# val += 1\n# b[p_i][p_j] = val\n# if 0 <= p_i + dir_i <= n - 1 and 0 <= p_j + dir_j <= n - 1:\n# if b[p_i + dir_i][p_j + dir_j] != 0:\n# if dir_i == 0 and dir_j == 1:\n# dir_i = 1\n# dir_j = 0\n# elif dir_i == 1 and dir_j == 0:\n# dir_i = 0\n# dir_j = -1\n# elif dir_i == 0 and dir_j == -1:\n# dir_i = -1\n# dir_j = 0\n# elif dir_i == -1 and dir_j == 0:\n# dir_i = 0\n# dir_j = 1\n# else:\n# if dir_i == 0 and dir_j == 1:\n# dir_i = 1\n# dir_j = 0\n# elif dir_i == 1 and dir_j == 0:\n# dir_i = 0\n# dir_j = -1\n# elif dir_i == 0 and dir_j == -1:\n# dir_i = -1\n# dir_j = 0\n# elif dir_i == -1 and dir_j == 0:\n# dir_i = 0\n# dir_j = 1\n# if b[p_i + dir_i - n][p_j + dir_j - n] != 0:\n# the_end = True\n# p_i += dir_i\n# p_j += dir_j\n# [print(*b[i], sep=' ') for i in range(n)]\n#\nn=int(input())\nt=[[0]*n for i in range (n)]\ni,j=0,0\nfor k in range(1, n*n+1):\n t[i][j]=k\n if k==n*n: break\n if i<=j+1 and i+j<n-1: j+=1\n elif i<j and i+j>=n-1: i+=1\n elif i>=j and i+j>n-1: j-=1\n elif i>j+1 and i+j<=n-1: i-=1\nfor i in range(n):\n print(*t[i])" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.6142857074737549, "avg_line_length": 21.535715103149414, "blob_id": "005820b914d674dddd4a9dcfadfea96bcf9bb6e6", "content_id": "01be5c981785e04082250e457187692799543092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 82, "num_lines": 28, "path": "/Python_stepik_2/3.5.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import csv\n\nwith open(\"Crimes.csv\", 'r') as f:\n\treader = csv.reader(f)\n\tr = (i[5] for i in reader if '2015' in i[2])\n\td = dict()\n\tfor word in r:\n\t\tif word in d:\n\t\t\td[word] += 1\n\t\telse:\n\t\t\td[word] = 0\n\td = {k: v for k, v in sorted(d.items(), key=lambda item: item[1], reverse=True)}\n\tprint(d)\n\n# import csv\n# from collections import Counter as c\n#\n# with open('Crimes.csv') as f:\n# \tdata = csv.reader(f)\n# \tprint(c( row[5] for row in data if '2015' in row[2] ))\n\n\n# import csv\n# from collections import Counter as c\n#\n# with open('Crimes.csv') as f:\n# result = c(row[5] for row in csv.reader(f))\n# print(result.most_common(1))" }, { "alpha_fraction": 0.5309917330741882, "alphanum_fraction": 0.6012396812438965, "avg_line_length": 47.5, "blob_id": "894a1e63f1d5c279152c5761ab3fdea8c9e893c3", "content_id": "f2e7d0583a9a34e69125528191df17a74c76998b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "no_license", "max_line_length": 89, "num_lines": 10, "path": "/Python_stepik_2/2.4.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "with open('dataset_24465_4.txt', 'r') as f, open('test_copy.txt', 'w') as w:\n\tr = [line.strip() for line in f]\n\tw.write('\\n'.join([r[i] for i in range(len(r)-1, -1, -1)]))\n\n\n# with open('dataset_24465_4.txt', 'r') as fr, open('dataset_24465_4_w.txt', 'w') as fw:\n# fw.writelines(fr.readlines()[::-1])\n\n# with open('dataset_24465_4.txt') as inp, open('dataset_24465_4_write.txt', 'w') as out:\n# [out.write('{}\\n'.format(oln)) for oln in reversed([iln.rstrip() for iln in inp])]" }, { "alpha_fraction": 0.30249109864234924, "alphanum_fraction": 0.3327402174472809, "avg_line_length": 16.30769157409668, "blob_id": "041f5e7719f6532d4ba15a616b803453b610a024", "content_id": "1884bb844d77b0de55d9955dc383d2cbaf5f84bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 51, "num_lines": 65, "path": "/MyPr1/ex4.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "# n = int(input())\n# i = 0\n# k = 0\n# j = 0\n# s = ''\n# if n == 0:\n# print(s)\n# else:\n# while k <= n:\n# i += 1\n# for j in range(i):\n# k += 1\n# if k < n:\n# # print(i, end=' ')\n# s += str(i) + ' '\n# elif k == n:\n# s += str(i)\n# print(s)\n\n# n, b = int(input()), []\n# c = []\n# for i in range(n):\n# if len(b) == n:\n# break\n# c = [i]*i\n# j = 0\n# b.append([i for j in range(i) if len(b) < n])\n# # while len(b) < n and len(c) - 1 >= j:\n# # b.append(c[j])\n# # j += 1\n# print(*b)\n\n# n = int(input())\n# a = []\n# i = 0\n# while len(a) < n:\n# # a += [i] * i\n# a.append([i] * i)\n# i += 1\n# print(*a[:n])\n\na = [1, 2, 3]\nb = [56, 67,78]\na +=b\na += [1, 2, 4]\na.append([1, 47, 34])\ns = str(a)\nprint(s)\n\n# n = int(input())\n# a = [for j in range(1) for i in range(n)]\n# a =\n\n# n = int(input())\n# i = 0\n# k = 0\n# j = 0\n# while True:\n# i += 1\n# for j in range(i):\n# k += 1\n# if k < n:\n# print(i, end=' ')\n# elif k == n:\n# print(i)" }, { "alpha_fraction": 0.592783510684967, "alphanum_fraction": 0.592783510684967, "avg_line_length": 15.891304016113281, "blob_id": "9a4584636b1e3f9faf237a84456262cdf7c82976", "content_id": "68ab8d010646fd04205ba053d072d8285b2e4cf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 776, "license_type": "no_license", "max_line_length": 59, "num_lines": 46, "path": "/Python_stepik_2/3.4.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import requests\nimport re\n\n\ndef url_in(in_wich, look_for):\n\tres = requests.get(in_wich)\n\t_lst = re.findall(r\"<a href=\\\"([^\\\"]*)\", res.text)\n\tfor i in _lst:\n\t\tif look_for in i:\n\t\t\treturn _lst, True\n\treturn _lst, False\n\n\na = input().strip()\nb = input().strip()\n\nlst, _bool = url_in(a, b)\nlists = ''\ns = 'No'\nfor i in lst:\n\tlists, _bool = url_in(i, b)\n\tif _bool :\n\t\ts = 'Yes'\n\t\tbreak\nprint(s)\n\n\n\n# import re\n# import requests\n#\n# start_url = input()\n# end_url = input()\n#\n# found = False\n#\n# link_pattern = re.compile(r'<a[^>]*?href=\"(.*?)\"[^>]*?>')\n#\n# resp = requests.get(start_url).text\n# for url in link_pattern.findall(resp):\n# \tcur_resp = requests.get(url).text\n# \tif end_url in link_pattern.findall(cur_resp):\n# \t\tfound = True\n# \t\tbreak\n#\n# print(\"Yes\" if found else \"No\")" }, { "alpha_fraction": 0.6280944347381592, "alphanum_fraction": 0.6511226296424866, "avg_line_length": 24.130434036254883, "blob_id": "66a40d1235f2a4d661ae3a36eebe8d751ef77cd5", "content_id": "75d464579f16a4c4a78f1a0e8f6d8a807227957f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2049, "license_type": "no_license", "max_line_length": 83, "num_lines": 69, "path": "/Python_stepik_2/1.4.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "namespaces = {'global': None} # словарь пространств - пространство : родитель\nvars = {'global': set()} # словарь переменных - пространство, множество переменных\n\n\ndef create(par1, par2):\n\t# create <namespace> <parent> – создать новое пространство\n\t# имен с именем <namespace> внутри пространства <parent>\n\tnamespaces[par1] = par2\n\n\ndef add(par1, par2):\n\t# add <namespace> <var> – добавить в пространство <namespace> переменную <var>\n\tif par1 in vars:\n\t\tvars[par1].add(par2)\n\telse:\n\t\tvars[par1] = set()\n\t\tvars[par1].add(par2)\n\n\ndef get(par1, par2):\n\t# get <namespace> <var> – получить имя пространства,\n\t# из которого будет взята переменная <var> при запросе и\n\t# з пространства <namespace>, или None, если такого пространства не существует\n\tglobal namespaces\n\tglobal vars\n\tif par1 is None:\n\t\treturn None\n\telif par1 in vars.keys():\n\t\tif par2 in vars[par1]:\n\t\t\treturn par1\n\t\telse:\n\t\t\tpar1 = namespaces[par1]\n\t\t\treturn get(par1, par2)\n\telse:\n\t\tpar1 = namespaces[par1]\n\t\treturn get(par1, par2)\n\n\ns = ''\nn = int(input())\nfor i in range(n):\n\tcmd, _par1, _par2 = input().split()\n\tif cmd == 'create':\n\t\tcreate(_par1, _par2)\n\telif cmd == 'add':\n\t\tadd(_par1, _par2)\n\telse:\n\t\ts += str(get(_par1, _par2)) + '\\n'\nprint(s)\n\n\n# # Интересный вызов функции\n# info = dict({'global':[None]})\n#\n# def create(namespace, parent):\n# info.update({namespace:[parent]})\n#\n# def add(namespace, var):\n# info[namespace].append(var)\n#\n# def get(namespace, var):\n# while namespace != None and var not in info[namespace][1:]:\n# namespace = info[namespace][0]\n# print(namespace)\n#\n# operations = {'create': create, 'add': add, 'get': get}\n# for i in range(int(input())):\n# inp = input().split()\n# operations[inp[0]](inp[1], inp[2])\n\n\n\n" }, { "alpha_fraction": 0.6104294657707214, "alphanum_fraction": 0.6134969592094421, "avg_line_length": 20.799999237060547, "blob_id": "4d80c2eba527a1a603c817d147ed90daca136014", "content_id": "fed059ab3b985423e22b183f04e6980dc5abf0ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 90, "num_lines": 15, "path": "/Python_stepik_2/2.4.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import os\n\nr = []\nfor current_dir, dirs, files in os.walk('main'):\n\tif '.py' in [files[i][-3:] for i in range(len(files))]:\n\t\tr.append(current_dir)\nr.sort()\n[print(i) for i in r]\n\n\n# import os\n#\n# result = [cur_dir for cur_dir, dirs, files in os.walk(\"main\") if any([fl.endswith(\".py\")\n# for fl in files])]\n# print(result)" }, { "alpha_fraction": 0.6066176295280457, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 18.5, "blob_id": "fbd2be53c828636b226023be726994de4963b171", "content_id": "e10fbc5f3e4e967634f83cb3a754f71e4a341627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/Python_stepik_2/1.2.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "ans = 0\nn = len(objects)\nfor i in range(n): # доступная переменная objects\n\tf = True\n\tfor j in range(i + 1, n):\n\t\tif objects[i] is objects[j]:\n\t\t\tf = False\n\t\t\tbreak\n\tif f:\n\t\tans += 1\nprint(ans)\n\n# print(len(set(map(id, objects))))\n# print(len(set(id(i) for i in objects)))" }, { "alpha_fraction": 0.48148149251937866, "alphanum_fraction": 0.5308641791343689, "avg_line_length": 13.235294342041016, "blob_id": "b744ad8b51d816c7f194fc892ceb80a38c532fc1", "content_id": "1a5c39abd529011c795f3fbd8295d0f70ecfc170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 22, "num_lines": 17, "path": "/Python_stepik_2/3.2.1.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "s = input()\na = input()\nb = input()\ns1 = ''\nif a == b and a in s:\n\tprint('Impossible')\nelse:\n\tfor i in range(1001):\n\t\ts1 = s.replace(a, b)\n\t\tif s == s1:\n\t\t\tprint(i)\n\t\t\tbreak\n\t\telif i == 1000:\n\t\t\tprint('Impossible')\n\t\t\tbreak\n\t\telse:\n\t\t\ts = s1\n\n" }, { "alpha_fraction": 0.47428572177886963, "alphanum_fraction": 0.508571445941925, "avg_line_length": 16.600000381469727, "blob_id": "30c02e851ef2d658967c9a1706fae0aa10ee758f", "content_id": "facff2a774f1656fb875e1562134027243139119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/Python_stepik_2/1.3.2.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "n, k = map(int, input().split())\ndef c_rec(n, k):\n\tif k == 0:\n\t\treturn 1\n\telif k > n:\n\t\treturn 0\n\telse:\n\t\treturn c_rec(n - 1, k) + c_rec(n - 1, k - 1)\ny = c_rec(n, k)\nprint(y)" }, { "alpha_fraction": 0.6692307591438293, "alphanum_fraction": 0.6769230961799622, "avg_line_length": 20.83333396911621, "blob_id": "3bd03f4549980d40ba1b6501dfc517f664f6b515", "content_id": "33e79c16eb95b553d6f58a52ed6ff41a8826afce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 63, "num_lines": 6, "path": "/Python_stepik_2/3.3.7.py", "repo_name": "Arkkav/git-learing", "src_encoding": "UTF-8", "text": "import re\nimport sys\n\nfor line in sys.stdin:\n\tline = line.rstrip()\n\tprint(re.sub(r\"\\ba+\\b\", \"argh\", line, 1, flags=re.IGNORECASE))" } ]
58
SK-CERT/dell-storage-client
https://github.com/SK-CERT/dell-storage-client
1e37db96a49ff1eaaeca3a64e9ea4770bba07cb3
c72586f194211f4e6c3d00fe878802f42ac362cb
daa265d6802252ad5fd0ecdf866bb21e124f62e7
refs/heads/master
2020-12-29T19:46:12.931765
2020-03-05T14:04:09
2020-03-05T14:04:09
238,711,105
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6138086915016174, "alphanum_fraction": 0.6164708733558655, "avg_line_length": 39.752357482910156, "blob_id": "5b0c8f91ee180534215567d6e1dfad476456aaa9", "content_id": "47f1ebd7f8b25adee0e1e722525b36758b61215b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17279, "license_type": "no_license", "max_line_length": 120, "num_lines": 424, "path": "/dell_storage_api/volume.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "\"\"\" This module contains classes for management of volumes in Storage Center managed by Dell Storage Manager\"\"\"\nfrom typing import Dict, Any\n\nfrom requests import Session\n\nfrom dell_storage_api.storage_object import StorageObject, StorageObjectCollection, StorageObjectFolder\n\n\nclass Volume(StorageObject):\n \"\"\"\n Class that represents volume in Storage Center\n \"\"\"\n ENDPOINT = '/StorageCenter/ScVolume'\n VOLUME_ENDPOINT = '/StorageCenter/ScVolume/%s'\n MAPPING_ENDPOINT = '/StorageCenter/ScVolume/%s/MapToServer'\n MAPPING_PROFILE_ENDPOINT = '/StorageCenter/ScVolume/%s/MappingProfileList'\n UNMAPPING_ENDPOINT = '/StorageCenter/ScVolume/%s/Unmap'\n RECYCLE_ENDPOINT = '/StorageCenter/ScVolume/%s/Recycle'\n EXPAND_TO_SIZE_ENDPOINT = '/StorageCenter/ScVolume/%s/ExpandToSize'\n EXPAND_ENDPOINT = '/StorageCenter/ScVolume/%s/Expand'\n\n def __init__(self, req_session: Session, base_url: str, name: str, instance_id: str,\n parent_folder_id: str, wwid: str, status: str) -> None:\n super().__init__(req_session, base_url, name, instance_id)\n self.parent_folder_id = parent_folder_id\n self.wwid = wwid\n self.status = status\n\n @classmethod\n def from_json(cls, req_session: Session, base_url: str, source_dict: Dict[Any, Any]) -> 'Volume':\n \"\"\"\n Class method that creates instance of Volume class from supplied dictionary. Source dictionary is expected\n to contain at least 'instanceId', 'name', 'deviceId' and 'volumeFolder' keys. Value of 'volumeFolder' is\n expected to be of type dict, containing at least key 'instanceId'\n :param req_session: requests.Session object with stored login cookies. (passed down\n from dell_storage_api.session.DsmSession)\n :param base_url: base URL of DSM\n :param source_dict: Dictionary containing data about Volume object\n :return: instance of a Volume class\n \"\"\"\n return Volume(req_session=req_session,\n base_url=base_url,\n name=source_dict['name'],\n instance_id=source_dict['instanceId'],\n parent_folder_id=source_dict['volumeFolder']['instanceId'],\n wwid=source_dict['deviceId'],\n status=source_dict['status']\n )\n\n @property\n def mapping_url(self) -> str:\n \"\"\"\n Return complete URL for mapping this volume to specific server\n :return: URL for volume mapping\n \"\"\"\n return self.build_url(self.MAPPING_ENDPOINT)\n\n @property\n def unmapping_url(self) -> str:\n \"\"\"\n Return complete URL for unmapping this volume from servers to which it is currently mapped\n :return: URL for volume unmapping\n \"\"\"\n return self.build_url(self.UNMAPPING_ENDPOINT)\n\n @property\n def mapping_profile_url(self) -> str:\n \"\"\"\n Return complete URL to read mapping profile of this volume.\n :return: URL for volume mapping list\n \"\"\"\n return self.build_url(self.MAPPING_PROFILE_ENDPOINT)\n\n @property\n def recycle_url(self) -> str:\n \"\"\"\n Return complete URL for moving this volume to recycle bin\n :return: URL for moving this volume to recycle bin\n \"\"\"\n return self.build_url(self.RECYCLE_ENDPOINT)\n\n @property\n def delete_url(self) -> str:\n \"\"\"\n Return complete URL to permanently remove this volume.\n WARNING: this action can not be undone\n :return: URL for removing this volume\n \"\"\"\n return self.build_url(self.VOLUME_ENDPOINT)\n\n @property\n def expand_url(self) -> str:\n \"\"\"\n Return complete URL for expanding this volume's capacity by specified amount.\n :return: URL for expanding this volume by specified amount\n \"\"\"\n return self.build_url(self.EXPAND_ENDPOINT)\n\n @property\n def expand_to_size_url(self) -> str:\n \"\"\"\n Return complete URL for expanding this volume's capacity to specified size\n :return: URL for expanding this volume to specified size\n \"\"\"\n return self.build_url(self.EXPAND_TO_SIZE_ENDPOINT)\n\n @property\n def modify_url(self) -> str:\n \"\"\"\n Return complete URL for modification of this volume. Modifiable attributes are 'Name' and 'VolumeFolder'\n :return: URL for modification of this volume\n \"\"\"\n return self.build_url(self.VOLUME_ENDPOINT)\n\n @property\n def details_url(self) -> str:\n \"\"\"\n Return complete URL for getting details about this volume\n :return: URL for details about this volume\n \"\"\"\n return self.build_url(self.VOLUME_ENDPOINT)\n\n def map_to_server(self, server_id: str) -> bool:\n \"\"\"\n Perform API call to DSM that maps this volume to server with instance ID specified by parameter 'server_id'. If\n supplied server_id is instance ID of a cluster, this volume will be mapped to every server that is part of that\n cluster.\n This operation fails if volume is already mapped to some server.\n :param server_id: Instance ID of server to which this volume will be mapped\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n payload = {'Server': server_id}\n resp = self.session.post(self.mapping_url, json=payload)\n if resp.status_code == 200:\n success = True\n print(\"OK - Volume '%s' (%s) sucessfully mapped to server.\" % (self.name, self.instance_id))\n else:\n print(\"Error: Failed to map volume - %s\" % resp.json().get('result'))\n return success\n\n def unmap(self) -> bool:\n \"\"\"\n Perform API call to DSM that unmaps this volume from any servers it is currently mapped to.\n WARNING: unmapping volume from active servers will cause those servers to loose connectivity with this volume.\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n resp = self.session.post(self.unmapping_url)\n if resp.status_code == 204:\n success = True\n print('OK - Volume successfully unmapped')\n else:\n print('Error: Failed to unamp volume - %s' % resp.text)\n return success\n\n def mapping(self) -> object:\n \"\"\"\n \"\"\"\n success = False\n resp = self.session.get(self.mapping_profile_url)\n if resp.status_code == 200:\n success = True\n mapping_profiles = resp.json()\n if mapping_profiles:\n return mapping_profiles[0]['server']\n else:\n print(\"Error: Failed to get volume mapping list\")\n return None\n\n def expand(self, size: str) -> bool:\n \"\"\"\n Perform API call to DSM that expands this volume by specified amount.\n :param size: Size by which this volume will be expanded (e.g.: 10GB or 1.2TB)\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n payload = {\"ExpandAmount\": size}\n resp = self.session.post(self.expand_url, json=payload)\n if resp.status_code == 200:\n success = True\n print(\"OK - Volume expanded by %s\" % size)\n else:\n print(\"Error: Failed to expand volume - %s\" % resp.json().get('result'))\n return success\n\n def expand_to_size(self, size: str) -> bool:\n \"\"\"\n Perform API call to DSM that expands this volume to the specified size. This method can be used only to\n increase volume size, DSM is unable to shrink volumes\n :param size: Size to which this volume is expanded (e.g.: 500GB or 2.5TB)\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n payload = {\"NewSize\": size}\n resp = self.session.post(self.expand_to_size_url, json=payload)\n if resp.status_code == 200:\n success = True\n print(\"OK - Volume expanded to size %s\" % size)\n else:\n print(\"Error: Failed to expand volume - %s\" % resp.json().get('result'))\n return success\n\n def recycle(self) -> bool:\n \"\"\"\n Perform API call to DSM that moves this volume to recycle bin. Volumes in recycle bin can be restored.\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n resp = self.session.post(self.recycle_url)\n if resp.status_code == 204:\n success = True\n print('OK - Volume successfully moved to recycle bin')\n else:\n print('Error: Failed to recycle volume - %s' % resp.text)\n return success\n\n def delete(self) -> bool:\n \"\"\"\n Perform API call to DSM that permanently deletes this volume.\n WARNING: This action can not be undone.\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n resp = self.session.delete(self.delete_url)\n if resp.status_code == 200:\n success = True\n print(\"Ok - Volume successfully deleted\")\n else:\n print(\"Error: Failed to delete volume - %s\" % resp.json().get('result'))\n return success\n\n def _modify_volume(self, payload: Dict[str, str]) -> bool:\n \"\"\"\n Internal method that performs API call to DSM to modify volume properties. Only properties that are modifiable\n are 'Name' and 'VolumeFolder'.\n :param payload: Dictionary with modified properties and their new values (e.g.: {'Name': 'new_volume_name'})\n :return: True if operation is successful, otherwise False\n \"\"\"\n # TODO: Move common functionality (like modify/rename/move) to base class\n success = False\n resp = self.session.put(self.modify_url, json=payload)\n if resp.status_code == 200:\n success = True\n print(\"Ok - Volume modified\")\n else:\n print(\"Error: Failed to modify volume\")\n return success\n\n def rename(self, new_name: str) -> bool:\n \"\"\"\n Perform API call to DSM to change current volume name to the new value.\n Note: Volume names do not have to be unique even within same folder\n :param new_name: New name for this volume\n :return: True if operation is successful, otherwise False\n \"\"\"\n if self._modify_volume({\"Name\": new_name}):\n self.name = new_name\n return True\n else:\n return False\n\n def move_to_folder(self, volume_folder_id: str) -> bool:\n \"\"\"\n Perform API call to DSM to move this volume to the folder with instance ID specified by parameters\n volume_folder_id.\n :param volume_folder_id: Instance ID of a Volume folder to which this volume will be moved\n :return: True if operation is successful, otherwise False\n \"\"\"\n if self._modify_volume({\"VolumeFolder\": volume_folder_id}):\n self.parent_folder_id = volume_folder_id\n return True\n else:\n return False\n\n def details(self) -> Dict[str, Any]:\n \"\"\"\n Perform API call to DSM to get all information available about this volume and returns it in\n form of a dictionary\n :return: Dictionary containing details about this volume.\n \"\"\"\n result: Dict[str, Any] = {}\n resp = self.session.put(self.details_url)\n if resp.status_code == 200:\n result = resp.json()\n else:\n print(\"Error: Failed to fetch volume details\")\n return result\n\n\nclass VolumeCollection(StorageObjectCollection):\n \"\"\" Collection of volume folders\"\"\"\n\n def find_by_parent_folder(self, folder_id: str) -> 'VolumeCollection':\n \"\"\"\n Return subset VolumeCollection that contains only volumes with specified parent folder.\n :param folder_id: Instance ID of a folder whose children should be in the result\n :return: Volumes with common parent specified by folder_id\n \"\"\"\n result = VolumeCollection()\n for volume in self:\n if volume.parent_folder_id == folder_id:\n result.add(volume)\n return result\n\n\nclass VolumeFolder(StorageObjectFolder):\n \"\"\" Class representing Volume Folder\"\"\"\n ENDPOINT = '/StorageCenter/ScVolumeFolder'\n VOLUME_FOLDER_ENDPOINT = '/StorageCenter/ScVolumeFolder/%s'\n\n @classmethod\n def from_json(cls, req_session: Session, base_url: str, source_dict: Dict[Any, Any]) -> 'VolumeFolder':\n \"\"\"\n Class method that creates instance of VolumeFolder class from supplied dictionary. Source dictionary is expected\n to contain at least 'instanceId' and 'name' keys. Optional key 'parent' can be present and its value is\n expected to be dict containing key 'instanceId'.\n :param req_session: requests.Session object with stored login cookies. (passed down\n from dell_storage_api.session.DsmSession)\n :param base_url: base URL of DSM\n :param source_dict: Dictionary containing data about VolumeFolder object\n :return: instance of a VolumeFolder class\n \"\"\"\n return VolumeFolder(req_session=req_session,\n base_url=base_url,\n name=source_dict[\"name\"],\n instance_id=source_dict[\"instanceId\"],\n parent_id=source_dict.get('parent', {}).get('instanceId', None))\n\n @property\n def modify_url(self) -> str:\n \"\"\"\n Return complete URL for modification of this volume folder\n :return: URL for folder modification\n \"\"\"\n return self.build_url(self.VOLUME_FOLDER_ENDPOINT)\n\n @property\n def details_url(self) -> str:\n \"\"\"\n Return complete URL for getting details about this volume folder\n :return: URL for details about this volume folder\n \"\"\"\n return self.build_url(self.VOLUME_FOLDER_ENDPOINT)\n\n @property\n def delete_url(self) -> str:\n \"\"\"\n Return complete URL to permanently remove this volume Folder.\n WARNING: this action can not be undone\n :return: URL for removing this volume folder\n \"\"\"\n return self.build_url(self.VOLUME_FOLDER_ENDPOINT)\n\n def _modify_volume_folder(self, payload: Dict[str, str]) -> bool:\n \"\"\"\n Internal method that performs call to DMS to modify this volume folder. Only modifiable properties are\n 'Name' and 'Parent'.\n :param payload: Dictionary with modified properties and their new values (e.g.: {'Name': 'new_folder_name'})\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n resp = self.session.put(self.modify_url, json=payload)\n if resp.status_code == 200:\n success = True\n print(\"Ok - Volume folder modified\")\n else:\n print(\"Error: Failed to modify volume folder\")\n return success\n\n def rename(self, name: str) -> bool:\n \"\"\"\n Perform API call to DSM to change this volume folder name to the new value.\n Note: Volume Folder names do not have to be unique even within same parent folder\n :param name: New name for this volume folder\n :return: True if operation is successful, otherwise False\n \"\"\"\n if self._modify_volume_folder({\"Name\": name}):\n self.name = name\n return True\n else:\n return False\n\n def move_to_folder(self, parent_folder_id: str) -> bool:\n \"\"\"\n Perform API call to DSM to move this folder to different parent folder specified by Instance ID in\n 'parent_folder_id' parameter.\n :param parent_folder_id: Instance ID of the new parent folder\n :return: True if operation is successful, otherwise False\n \"\"\"\n if self._modify_volume_folder({\"VolumeFolder\": parent_folder_id}):\n self.parent_id = parent_folder_id\n return True\n else:\n return False\n\n def details(self) -> Dict[str, Any]:\n \"\"\"\n Perform API call to DSM to fetch details about this volume folder. Result is returned as dictionary\n :return: Dictionary containing details about this volume folder\n \"\"\"\n result: Dict[str, Any] = {}\n resp = self.session.put(self.details_url)\n if resp.status_code == 200:\n result = resp.json()\n else:\n print(\"Error: Failed to fetch volume folder details\")\n return result\n\n def delete(self) -> bool:\n \"\"\"\n Perform API call to DSM to permanently delete this volume folder.\n Note: Volume folder can not be removed if it contains volumes.\n WARNING: This action can not be undone\n :return: True if operation is successful, otherwise False\n \"\"\"\n success = False\n resp = self.session.delete(self.delete_url)\n if resp.status_code == 200:\n success = True\n print(\"Ok - Volume folder successfully deleted\")\n else:\n print(\"Error: Failed to delete volume folder - %s\" % resp.json().get('result'))\n return success\n" }, { "alpha_fraction": 0.6333118677139282, "alphanum_fraction": 0.6353750228881836, "avg_line_length": 43.31428527832031, "blob_id": "c94848c943ad0353550f780b1532f4296a431234", "content_id": "5157cf487c4446bce91a145f3dc465adf6a75b79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23265, "license_type": "no_license", "max_line_length": 127, "num_lines": 525, "path": "/bin/dell-storage-client", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# pylint: disable=C0103,C0111\nimport argparse\nimport getpass\nimport json\nfrom typing import Optional, List\n\nfrom texttable import Texttable\n\nfrom dell_storage_api import DsmSession, StorageCenter\n\nCMD_CONST_VOLUME = 'volume'\nCMD_CONST_VOLUME_CREATE = 'create'\nCMD_CONST_VOLUME_LIST = 'list'\nCMD_CONST_VOLUME_MAP = 'map'\nCMD_CONST_VOLUME_UNMAP = 'unmap'\n\nCMD_CONST_VOLUME_FOLDER = 'volume_folder'\nCMD_CONST_VOLUME_FOLDER_CREATE = 'create'\nCMD_CONST_VOLUME_FOLDER_LIST = 'list'\n\nCMD_CONST_STORAGE_CENTER = 'storage_center'\nCMD_CONST_STORAGE_CENTER_LIST = 'list'\n\nCMD_CONST_SERVER = 'server'\nCMD_CONST_SERVER_LIST = 'list'\n\nclass ReturnCode: # pylint: disable=R0903\n \"\"\"Convenience class that holds semantic return codes \"\"\"\n SUCCESS = 0\n FAILURE = 1\n\n\nclass ServerType:\n \"\"\"Convenience class that represents various types of Server object\"\"\"\n\n def __init__(self) -> None:\n self._server = 'Server'\n self._cluster = 'Cluster'\n self._all = 'All'\n self._all_types = [self._all, self._server, self._cluster]\n\n @property\n def server(self) -> str:\n \"\"\"\n Return string representing Physical server type\n :return: Physical server type\n \"\"\"\n return self._server\n\n @property\n def cluster(self) -> str:\n \"\"\"\n Return string representing Custer server type\n :return: Cluster server type\n \"\"\"\n return self._cluster\n\n @property\n def all_keyword(self) -> str:\n \"\"\"\n Return list of all possible server type keywords (including magic keyword 'All')\n :return: list of all server type keywords\n \"\"\"\n return self._all\n\n @property\n def all_types(self) -> List[str]:\n \"\"\"\n Magic keyword representing all/any server types\n :return: keyword representing all types\n \"\"\"\n return self._all_types\n\n\nSERVER_TYPES = ServerType()\n\ndef print_table(table):\n cli_args = parse_arguments()\n\n if cli_args.json:\n res = []\n for row in table._rows:\n line = {}\n for i in range(len(table._header)):\n line[table._header[i]] = row[i]\n res.append(line)\n text = json.dumps(res)\n else:\n text = table.draw()\n\n print (text)\n\n\ndef volume_create(storage: StorageCenter, name: str, size: str,\n unique_name: bool = True, folder_id: str = '', map_to_id: str = '') -> int:\n \"\"\"\n Create new volume in Storage Center.\n :param storage: Storage Center in which new volume will be created\n :param name: Name of the volume\n :param size: Size of the volume (e.g.: '500GB' or '1.5TB')\n :param unique_name: Should the volume creation fail if the valoume name already exists in this folder ?\n :param folder_id: Folder in which the volume will be created (Defaults to root folder)\n :param map_to_id: Instance ID of a server (or cluster) to which this volume should be mapped\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n if map_to_id:\n mapping_server = storage.server_list().find_by_instance_id(map_to_id)\n if mapping_server is None:\n print(\"Volume can't be mapped to server with instance ID '%s'. No such server\" % map_to_id)\n return ReturnCode.FAILURE\n if unique_name and storage.volume_list().find_by_name(name):\n print(\"Volume with name '%s' already exists\" % name)\n return ReturnCode.FAILURE\n\n new_volume = storage.new_volume(name, size, folder_id)\n if new_volume is not None:\n print(\"OK - Volume '%s' created with instance ID '%s'\" % (new_volume.name, new_volume.instance_id))\n if map_to_id:\n if new_volume.map_to_server(map_to_id):\n return ReturnCode.SUCCESS\n else:\n return ReturnCode.FAILURE\n else:\n return ReturnCode.SUCCESS\n else:\n return ReturnCode.FAILURE\n\n\ndef volume_map(storage: StorageCenter, volume_id: str, server_id: str) -> int:\n \"\"\"\n Map existing volume to the server (or cluster).\n NOTE: Volume that is already mapped to server can not be mapped again without unmapping it first.\n :param storage: Storage Center where the volume is located\n :param volume_id: Instance ID of a Volume to be mapped\n :param server_id: Instance ID of a Server (or a cluster) to which volume will be mapped\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n volume = storage.volume_list().find_by_instance_id(volume_id)\n if volume is None:\n print(\"Failed to map volume with ID '%s'. No such volume\" % volume_id)\n return ReturnCode.FAILURE\n server = storage.server_list().find_by_instance_id(server_id)\n if server is None:\n print(\"failed to map volume '%s' (%s) to server with ID '%s'. No such server\" % (volume.name,\n volume.instance_id,\n server_id))\n return ReturnCode.FAILURE\n if volume.map_to_server(server_id=server_id):\n return ReturnCode.SUCCESS\n else:\n return ReturnCode.FAILURE\n\n\ndef volume_unmap(storage: StorageCenter, volume_id: str) -> int:\n \"\"\"\n Unmap volume from all servers it is currently mapped to\n :param storage: Storage Center where volume is located\n :param volume_id: Instance ID of a volume to be unampped\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n volume = storage.volume_list().find_by_instance_id(volume_id)\n if volume is None:\n print(\"Failed to unmap volume with ID '%s'. No such volume\" % volume_id)\n return ReturnCode.FAILURE\n if volume.unmap():\n return ReturnCode.SUCCESS\n else:\n return ReturnCode.FAILURE\n\n\ndef volume_list(storage: StorageCenter, folder_id: str = '', show_mapping: bool = False) -> int:\n \"\"\"\n Print table of Volumes present in Storage Center in specified volume folder.\n :param storage: Storage Center, from which to list volumes\n :param folder_id: Volume Folder, from which to list volumes (Defaults to root)\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n table = Texttable(max_width=120)\n all_volumes = storage.volume_list()\n if folder_id:\n all_volumes = all_volumes.find_by_parent_folder(folder_id)\n\n if show_mapping:\n table.header(['volume', 'instance_id', 'parent_folder', 'wwid', 'status', 'mapping'])\n table.set_cols_dtype(['t', 't', 't', 't','t','t'])\n else:\n table.header(['volume', 'instance_id', 'parent_folder', 'wwid', 'status'])\n table.set_cols_dtype(['t', 't', 't', 't','t'])\n\n for volume in all_volumes:\n if show_mapping:\n mapping = volume.mapping()\n mapping_name = mapping['instanceName'] if mapping else None\n table.add_row([volume.name, volume.instance_id, volume.parent_folder_id, volume.wwid, volume.status, mapping_name])\n else:\n table.add_row([volume.name, volume.instance_id, volume.parent_folder_id, volume.wwid, volume.status])\n\n print_table(table)\n\n return ReturnCode.SUCCESS\n\n\ndef volume_folder_list(storage: StorageCenter, parent_id: str = '') -> int:\n \"\"\"\n Print table of Volume Folders present in Storage Center in specified parent Volume Folder\n :param storage: Storage Center, from which to list volume folders\n :param parent_id: Parent volume folder, from which the child volume folders will be listed (Defaults to root)\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n # TODO: Adaptable table width\n table = Texttable(max_width=120)\n table.header(['folder', 'instance_id', 'parent_instance_id'])\n table.set_cols_dtype(['t', 't', 't'])\n folder_list = storage.volume_folder_list()\n if parent_id:\n folder_list = folder_list.find_by_parent_id(parent_id)\n for folder in folder_list:\n table.add_row([folder.name, folder.instance_id, folder.parent_id])\n print_table(table)\n\n return ReturnCode.SUCCESS\n\n\ndef volume_folder_create(storage: StorageCenter, folder_name: str, folder_parent_id: str = '',\n unique_name: bool = True) -> int:\n \"\"\"\n Create new volume folder in Storage Center\n :param storage: Storage Center where the new folder will be created\n :param folder_name: Name of the new folder\n :param folder_parent_id: Instance ID of a parent folder for the new volume folder\n :param unique_name: Should the volume creation fail if volume with same name already exists within parent folder\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n if unique_name and storage.volume_folder_list().find_by_name(folder_name):\n print(\"Volume folder with name '%s' already exists\" % folder_name)\n return ReturnCode.FAILURE\n if storage.new_volume_folder(folder_name, folder_parent_id):\n return ReturnCode.SUCCESS\n else:\n return ReturnCode.FAILURE\n\n\ndef server_list(storage: StorageCenter, object_type: str) -> int:\n \"\"\"\n Print table of Servers defined in Storage Center.\n :param storage: Storage Center from which servers will be listed\n :param object_type: Limit output only to Servers of specific\n type (e.g.: SERVER_TYPES.server or SERVER_TYPES.cluster)\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n table = Texttable(max_width=120)\n table.header(['server', 'type', 'instance_id'])\n table.set_cols_dtype(['t', 't', 't'])\n\n servers = storage.server_list()\n if object_type == SERVER_TYPES.server:\n servers = servers.filter_physical_servers()\n elif object_type == SERVER_TYPES.cluster:\n servers = servers.filter_clusters()\n\n for server in servers:\n table.add_row([server.name,\n server.pretty_type(),\n server.instance_id])\n print_table(table)\n return ReturnCode.SUCCESS\n\n\ndef storage_center_list(session: DsmSession) -> int:\n \"\"\"\n Print table of Storage Centers connected to Dell Storage Manager\n :param session: Authenticated session with Dell Storage Manager\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a operation\n \"\"\"\n storage_centers = session.storage_centers()\n table = Texttable()\n table.header([\"name\", \"ip\", \"instance_id\", \"serial\"])\n table.set_cols_dtype(['t', 't', 't', 't'])\n\n for storage_center in storage_centers:\n table.add_row([storage_center.name,\n storage_center.ip_addr,\n storage_center.instance_id,\n storage_center.serial_num])\n print_table(table)\n\n return ReturnCode.SUCCESS\n\n\ndef _find_storage_center(session: DsmSession, instance_id: str) -> Optional[StorageCenter]:\n \"\"\"\n Find and return Storage Center with specified Instance ID connected to Dell Storage manager. If no such\n Storage Center is found, return None.\n :param session: Authenticated session with Dell Storage Manager\n :param instance_id: Instance ID of a Storage Center that should be returned\n :return: Storage Center with Given Instance ID or None if no such ID is found\n \"\"\"\n storage_center = session.storage_centers().find_by_instance_id(instance_id)\n if storage_center is None:\n print(\"Failed to find storage center with instance ID '%s'. Try listing all storage \"\n \"centers with command 'storage_center list'\" % instance_id)\n return None\n else:\n return storage_center # type: ignore\n\n\ndef exit_cli(session: DsmSession, return_code: int) -> None:\n \"\"\"\n Perform Session logout and exit program\n :param session: Session with Dell Storage Manager\n :param return_code: Return Code to exit with\n :return: None\n \"\"\"\n session.logout(silent=True)\n exit(return_code)\n\n\ndef parse_arguments() -> argparse.Namespace: # pylint: disable=R0914\n \"\"\"\n Define and parse CLI commands, subcommands and arguments using argparse module\n :return: argparse.Namespace with parsed CLI argument values\n \"\"\"\n parser = argparse.ArgumentParser()\n\n # General options\n parser.add_argument('-H', '--host', required=True, help=\"Hostname or IP address of Dell Storage Center\")\n parser.add_argument('-P', '--port', default=3033, help=\"Management port of Dell storage Center\")\n parser.add_argument('-u', '--user', help='Login username')\n parser.add_argument('-p', '--password', help='Login password')\n parser.add_argument('-j', '--json', action='store_true', help='Output in JSON format')\n\n # Top level subcommands\n command_parser = parser.add_subparsers(dest='command')\n\n # Storage Center subcommands\n storage_center_parser = command_parser.add_parser(CMD_CONST_STORAGE_CENTER)\n storage_center_parser_cmd = storage_center_parser.add_subparsers(dest='storage_center_commands')\n # List Storage centers\n storage_center_parser_cmd.add_parser(CMD_CONST_STORAGE_CENTER_LIST)\n\n # Volume subcommands\n volume_parser = command_parser.add_parser(CMD_CONST_VOLUME)\n volume_parser_cmd = volume_parser.add_subparsers(dest='volume_commands')\n # Create volume\n volume_create_args = volume_parser_cmd.add_parser(CMD_CONST_VOLUME_CREATE)\n volume_create_args.add_argument('-s', '--size', required=True, help='Size of the new volume. Example: \"500GB\"')\n volume_create_args.add_argument('-n', '--name', required=True, help='Name of the new volume')\n volume_create_args.add_argument('-f', '--folder-id', dest='folder_id', help='Instance ID of parent folder')\n volume_create_args.add_argument('-m' '--map-to-server', dest='map_to_server',\n help='Instance ID of server to which new volume will be mapped')\n volume_create_args.add_argument('-S', '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center where the volume will be created')\n volume_create_args.add_argument('-Q', '--non-unique-name', default=False, action='store_true',\n help='If this flag is present, volume creation wont fail if there is another '\n 'volume with the same name')\n # List Volumes\n volume_list_args = volume_parser_cmd.add_parser(CMD_CONST_VOLUME_LIST)\n volume_list_args.add_argument('-S', '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center from which the volumes will be listed')\n volume_list_args.add_argument('-f', '--folder-id', dest='folder_id', help='Instance ID of folder from '\n 'which the volumes will be listed')\n volume_list_args.add_argument('-m', '--show-mapping', dest='show_mapping', action=\"store_true\",\n help='Show mapping profile (this will slow down things!)')\n\n # Map Volume\n volume_map_args = volume_parser_cmd.add_parser(CMD_CONST_VOLUME_MAP)\n volume_map_args.add_argument('-S', '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center where volume is located')\n volume_map_args.add_argument('-v', '--volume-id', dest='volume_id', help='Instance ID of volume to be mapped')\n volume_map_args.add_argument('-m' '--map-to-server', dest='map_to_server',\n help='Instance ID of server to which this volume will be mapped')\n # Unmap Volume\n volume_unmap_args = volume_parser_cmd.add_parser(CMD_CONST_VOLUME_UNMAP)\n volume_unmap_args.add_argument('-S', '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center where volume is located')\n volume_unmap_args.add_argument('-v', '--volume-id', dest='volume_id', help='Instance ID of volume to be unmapped')\n\n # Volume Folder subcommands\n volume_folder_parser = command_parser.add_parser('volume_folder')\n volume_folder_parser_cmd = volume_folder_parser.add_subparsers(dest='volume_folder_commands')\n # List volume folders\n volume_folder_list_args = volume_folder_parser_cmd.add_parser(CMD_CONST_VOLUME_FOLDER_LIST)\n volume_folder_list_args.add_argument('-S' '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center from which, volume folders will be listed')\n volume_folder_list_args.add_argument('-f', '--folder-id', dest='folder_id',\n help='Instance ID of folder from which the child volumes will be listed')\n\n # Create volume folder\n volume_folder_create_args = volume_folder_parser_cmd.add_parser(CMD_CONST_VOLUME_FOLDER_CREATE)\n volume_folder_create_args.add_argument('-n', '--name', help='Name of the new folder')\n volume_folder_create_args.add_argument('-f', '--folder-id', dest='folder_id', help='Instance ID of parent folder')\n volume_folder_create_args.add_argument('-S' '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center from which,'\n 'volume folders will be listed')\n volume_folder_create_args.add_argument('-Q', '--non-unique-name', default=False, action='store_true',\n help='If this flag is present, folder creation wont fail if there is another'\n ' folder with the same name in specified parent folder')\n\n # Server subcommands\n server_parser = command_parser.add_parser('server')\n server_parser_cmd = server_parser.add_subparsers(dest='server_commands')\n # List Servers\n server_list_args = server_parser_cmd.add_parser(CMD_CONST_SERVER_LIST)\n server_list_args.add_argument('-t', '--type', default=SERVER_TYPES.all_keyword, choices=SERVER_TYPES.all_types,\n help='Print only server object of selected type. '\n '(Default=%s)' % SERVER_TYPES.all_keyword)\n server_list_args.add_argument('-S' '--storage-id', required=True, dest='storage_id',\n help='Instance ID of storage center from which, servers will be listed')\n return parser.parse_args()\n\n\ndef execute_command(args: argparse.Namespace, session: DsmSession) -> int: # pylint: disable=R0912,R0915\n \"\"\"\n Execute CLI command based on parsed arguments and parameters\n :param args: Parsed argparse CLI arguments\n :param session: Authenticated session with Dell Storage manager\n :return: ReturnCode.SUCCESS or ReturnCode.FAILURE based on the outcome of a performed command\n \"\"\"\n # Volume commands\n if args.command == CMD_CONST_VOLUME:\n # Create Volume\n if args.volume_commands == CMD_CONST_VOLUME_CREATE:\n unique_name = not args.non_unique_name\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is None:\n ret_code = ReturnCode.FAILURE\n else:\n ret_code = volume_create(storage=storage_center, name=args.name, size=args.size,\n unique_name=unique_name, map_to_id=args.map_to_server)\n # List Volumes\n elif args.volume_commands == CMD_CONST_VOLUME_LIST:\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is None:\n ret_code = ReturnCode.FAILURE\n else:\n parent_id = args.folder_id or ''\n show_mapping = args.show_mapping or False\n ret_code = volume_list(storage_center, parent_id, show_mapping)\n elif args.volume_commands == CMD_CONST_VOLUME_MAP:\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is None:\n ret_code = ReturnCode.FAILURE\n else:\n ret_code = volume_map(storage_center, args.volume_id, args.map_to_server)\n elif args.volume_commands == CMD_CONST_VOLUME_UNMAP:\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is None:\n ret_code = ReturnCode.FAILURE\n else:\n ret_code = volume_unmap(storage_center, args.volume_id)\n # Default branch\n else:\n ret_code = ReturnCode.FAILURE\n\n # Storage Center commands\n elif args.command == CMD_CONST_STORAGE_CENTER:\n # List Storage Centers\n if args.storage_center_commands == CMD_CONST_STORAGE_CENTER_LIST:\n ret_code = storage_center_list(session)\n # Default branch\n else:\n ret_code = ReturnCode.FAILURE\n\n # Volume Folder commands\n elif args.command == CMD_CONST_VOLUME_FOLDER:\n # List Volume folders\n if args.volume_folder_commands == CMD_CONST_VOLUME_FOLDER_LIST:\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is None:\n ret_code = ReturnCode.FAILURE\n else:\n parent_folder_id = args.folder_id or ''\n ret_code = volume_folder_list(storage_center, parent_folder_id)\n # Create volume folder\n elif args.volume_folder_commands == CMD_CONST_VOLUME_FOLDER_CREATE:\n unique_name = not args.non_unique_name\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is None:\n ret_code = ReturnCode.FAILURE\n else:\n ret_code = volume_folder_create(storage_center, args.name, args.folder_id, unique_name)\n # Default branch\n else:\n ret_code = ReturnCode.FAILURE\n # Server commands\n elif args.command == CMD_CONST_SERVER:\n # List servers\n if args.server_commands == CMD_CONST_SERVER_LIST:\n storage_center = _find_storage_center(session, args.storage_id)\n if storage_center is not None:\n ret_code = server_list(storage_center, args.type)\n else:\n ret_code = ReturnCode.FAILURE\n else:\n ret_code = ReturnCode.FAILURE\n else:\n ret_code = ReturnCode.FAILURE\n\n return ret_code\n\n\ndef main() -> None:\n # parse CLI arguments\n cli_args = parse_arguments()\n\n # Request missing arguments via CLI dialog\n if not cli_args.user:\n cli_args.user = input(\"Username: \")\n\n if not cli_args.password:\n cli_args.password = getpass.getpass()\n\n # Initialize Session with Storage controller\n scm_session = DsmSession(cli_args.user, cli_args.password, cli_args.host, cli_args.port, verify_cert=False)\n if not scm_session.login():\n exit(ReturnCode.SUCCESS)\n\n success = execute_command(cli_args, scm_session)\n exit_cli(scm_session, success)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6169469952583313, "alphanum_fraction": 0.6184279918670654, "avg_line_length": 42.96744155883789, "blob_id": "5132fe82785279d3a829aaf79dd09d7c91394e9c", "content_id": "aa59ee756b6896087da07d54f2425b0e9878c0e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9453, "license_type": "no_license", "max_line_length": 117, "num_lines": 215, "path": "/dell_storage_api/storage_center.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "\"\"\" This module contains classes that represent Storage Centers managed by Dell Storage manager (DSM) \"\"\"\nfrom typing import Any, Dict, Optional\n\nimport requests\n\nfrom dell_storage_api.storage_object import StorageObject, StorageObjectFolder, StorageObjectCollection, \\\n StorageObjectFolderCollection\nfrom dell_storage_api.volume import Volume, VolumeCollection, VolumeFolder\nfrom dell_storage_api.server import Server, ServerCollection\n\n\nclass StorageCenter(StorageObject):\n \"\"\"\n Class representing physical Storage Center managed by DSM\n \"\"\"\n SERVER_FOLDER_LIST_ENDPOINT = '/StorageCenter/StorageCenter/%s/ServerFolderList'\n SERVER_LIST_ENDPOINT = '/StorageCenter/StorageCenter/%s/ServerList'\n SERVER_FOLDER_ENDPOINT = '/StorageCenter/ScServerFolder/%s'\n\n VOLUME_FOLDER_LIST_ENDPOINT = '/StorageCenter/StorageCenter/%s/VolumeFolderList'\n VOLUME_LIST_ENDPOINT = '/StorageCenter/StorageCenter/%s/VolumeList'\n\n def __init__(self, req_session: requests.Session, base_url: str, name: str,\n instance_id: str, serial_num: str, ip_addr: str) -> None:\n super(StorageCenter, self).__init__(req_session, base_url, name, instance_id)\n self.serial_num = serial_num\n self.ip_addr = ip_addr\n\n @property\n def server_folder_list_url(self) -> str:\n \"\"\"\n Return complete URL to list all server folders in this Storage Center\n :return: URL for Server Folder listing\n \"\"\"\n return self.build_url(self.SERVER_FOLDER_LIST_ENDPOINT)\n\n @property\n def server_list_url(self) -> str:\n \"\"\"\n Return complete URL to list all servers defined in this Storage Center\n :return: URL for Server listing\n \"\"\"\n return self.build_url(self.SERVER_LIST_ENDPOINT)\n\n @property\n def volume_folder_list_url(self) -> str:\n \"\"\"\n Return complete URL to list all volume folders in this Storage Center\n :return: URL for Volume Folder listing\n \"\"\"\n return self.build_url(self.VOLUME_FOLDER_LIST_ENDPOINT)\n\n @property\n def volume_list_url(self) -> str:\n \"\"\"\n Return complete URL fo list all volumes present in this Storage Center\n :return: URL for Volume listning\n \"\"\"\n return self.build_url(self.VOLUME_LIST_ENDPOINT)\n\n def server_folder_list(self) -> StorageObjectFolderCollection:\n \"\"\"\n Return collection of all Server Folders in this Storage Center\n :return: Collection of all Server Folders\n \"\"\"\n return self._fetch_object_list(self.server_folder_list_url)\n\n def server_list(self) -> ServerCollection:\n \"\"\"\n Return collection of all servers defined in this Storage Center\n :return: Collection of all Servers\n \"\"\"\n result = ServerCollection()\n for server_data in self._fetch_object_list(self.server_list_url):\n result.add(Server.from_json(req_session=self.session,\n base_url=self.base_url,\n source_dict=server_data))\n return result\n\n def volume_folder_list(self) -> StorageObjectFolderCollection:\n \"\"\"\n Return collection of all Volume Folders in this Storage Center\n :return: Collection of all Volume Folders\n \"\"\"\n result = StorageObjectFolderCollection()\n for volume_folder_data in self._fetch_object_list(url=self.volume_folder_list_url):\n result.add(VolumeFolder.from_json(req_session=self.session,\n base_url=self.base_url,\n source_dict=volume_folder_data))\n return result\n\n def volume_list(self) -> VolumeCollection:\n \"\"\"\n Return collection of all volumes present in this Storage Center\n :return: Collection of all volumes\n \"\"\"\n result = VolumeCollection()\n for volume_data in self._fetch_object_list(url=self.volume_list_url):\n result.add(Volume.from_json(req_session=self.session,\n base_url=self.base_url,\n source_dict=volume_data))\n return result\n\n def _find_volume_folder_root(self) -> Optional[StorageObjectFolder]:\n \"\"\"\n Internal method to find root Volume Folder that contains all other Volumes and Volume Folders. Method returns\n None in case that there is problem with data fetching or lookup.\n :return: Root Volume Folder or None in case of failure\n \"\"\"\n all_folders = self.volume_folder_list()\n if not all_folders:\n print(\"Error: Failed to fetch volume folder list\")\n return None\n else:\n root_folder = self.volume_folder_list().root_folder()\n if root_folder is None:\n print(\"Error: Failed to lookup root volume folder in list of all folders. \"\n \"This really should not happen\")\n return None\n else:\n return root_folder\n\n def new_volume_folder(self, name: str, parent_folder_id: str = '') -> Optional[VolumeFolder]:\n \"\"\"\n Create new Volume Folder in Storage Center and return object representing this new folder. This method\n returns None in case there is a problem with folder creation.\n :param name: Name for the new Volume Folder\n :param parent_folder_id: Instance ID of parent folder. Defaults to root folder\n :return: new VolumeFolder object or None in case of failure\n \"\"\"\n if not parent_folder_id:\n parent_folder = self._find_volume_folder_root()\n if parent_folder is not None:\n parent_folder_id = parent_folder.instance_id\n else:\n print(\"Error: Failed to create new volume folder\")\n return None\n if parent_folder_id is None:\n print(\"Error: Failed to create new volume folder\")\n return None\n url = self.base_url + VolumeFolder.ENDPOINT\n payload = {\"Name\": name,\n \"Parent\": parent_folder_id,\n \"StorageCenter\": self.instance_id}\n resp = self.session.post(url, json=payload)\n if resp.status_code == 201:\n return VolumeFolder.from_json(self.session, self.base_url, resp.json())\n else:\n print(\"Error: Failed to create new volume folder.\")\n return None\n\n def new_volume(self, name: str, size: str, volume_folder_id: str = '') -> Optional[Volume]:\n \"\"\"\n Create new Volume in Storage Center and return object representing this new volume. This method returns None\n in case there is a problem with volume creation.\n :param name: Name of the new volume\n :param size: Size of the new volume (e.g: '100GB' or '1.5TB')\n :param volume_folder_id: Instance ID of folder in which this volume will be created. Defaults to root folder\n :return: new Volume object or None in case of failure\n \"\"\"\n if not volume_folder_id:\n volume_folder = self._find_volume_folder_root()\n if volume_folder is not None:\n volume_folder_id = volume_folder.instance_id\n else:\n print(\"Error: Failed to create new volume\")\n return None\n if volume_folder_id is None:\n print(\"Error: Failed to create new volume\")\n return None\n url = self.base_url + Volume.ENDPOINT\n payload = {\"Name\": name,\n \"Size\": size,\n \"StorageCenter\": self.instance_id,\n \"VolumeFolder\": volume_folder_id}\n resp = self.session.post(url, json=payload)\n if resp.status_code == 201:\n return Volume.from_json(self.session, self.base_url, resp.json())\n else:\n print(\"Error: Failed to create new volume. (%d) - %s\" % (resp.status_code, resp.text))\n return None\n\n def _fetch_object_list(self, url: str) -> Dict[Any, Any]:\n \"\"\"\n Internal generic method to fetch list of object from supplied URL. This method returns raw dictionary created\n from json in response body. This method returns empty dictionary if there is problem with data fetching\n :param url: URL of API endpoint that returns (json) list of objects\n :return: raw dictionary of objects returned by API endpoint\n \"\"\"\n result: Dict[Any, Any] = {}\n resp = self.session.get(url)\n if resp.status_code == 200:\n result = resp.json()\n else:\n print(\"Error: Failed to fetch object list (%d) - %s\" % (resp.status_code, resp.text))\n return result\n\n\nclass StorageCenterCollection(StorageObjectCollection):\n \"\"\"\n Collection representing multiple physical Storage Centers managed by single DSM\n \"\"\"\n\n def find_by_serial_num(self, serial_num: str) -> Optional[StorageCenter]:\n \"\"\"\n Return Storage Center object with given serial number. In case there is no such Storage Center, return None\n :param serial_num: Serial number of physical Storage Center\n :return: StorageCenter object with given serial number or None\n \"\"\"\n result = None\n for storage_center in self:\n if storage_center.serial_num == serial_num:\n result = storage_center\n break\n return result\n" }, { "alpha_fraction": 0.8702290058135986, "alphanum_fraction": 0.8702290058135986, "avg_line_length": 64.5, "blob_id": "1034ecb922e4f3409e8af44a44d510fd584cc141", "content_id": "ab49b16462340df324c28b4516a9d66af5a492ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 82, "num_lines": 2, "path": "/dell_storage_api/__init__.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "from dell_storage_api.session import DsmSession\nfrom dell_storage_api.storage_center import StorageCenterCollection, StorageCenter\n" }, { "alpha_fraction": 0.6359223127365112, "alphanum_fraction": 0.6361920237541199, "avg_line_length": 37.625, "blob_id": "f9e3cf0324a8039bbcab451c9665fcf16f5fafdf", "content_id": "0dbff666ff32d69a76022c0358f2fb131594468a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3708, "license_type": "no_license", "max_line_length": 116, "num_lines": 96, "path": "/dell_storage_api/server.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "\"\"\" This module contains classes representing servers connected to the Storage Center \"\"\"\nfrom typing import Dict, Any\n\nfrom requests import Session\n\nfrom dell_storage_api.storage_object import StorageObject, StorageObjectCollection\n\n\nclass Server(StorageObject):\n \"\"\"\n Class representing server connected to Storage controller. Server can be of type 'Physical Server' or 'Cluster'\n where cluster is logical group of 0-N physical servers.\n Instance ID of a server can be used to map Volume to this particular server. This action makes volume accessible\n as a block device in this server. Mapping volume to 'Cluster' makes volume accessible to all physical servers in\n this cluster.\n \"\"\"\n\n TYPE_PHYSICAL_SERVER = 'ScPhysicalServer'\n TYPE_SERVER_CLUSTER = 'ScServerCluster'\n\n def __init__(self, req_session: Session, base_url: str, name: str,\n instance_id: str, object_type: str) -> None:\n super(Server, self).__init__(req_session=req_session, base_url=base_url, name=name, instance_id=instance_id)\n self.type = object_type\n\n @classmethod\n def from_json(cls, req_session: Session, base_url: str, source_dict: Dict[Any, Any]) -> 'Server':\n \"\"\"\n Class method that creates instance of Server class from supplied dictionary. Source dictionary is expected\n to contain at least 'instanceId', 'name' and 'objectType' keys.\n :param req_session: requests.Session object with stored login cookies. (passed down\n from dell_storage_api.session.DsmSession)\n :param base_url: base URL of DSM\n :param source_dict: Dictionary containing data about Server object\n :return: instance of a Server class\n \"\"\"\n return Server(req_session=req_session,\n base_url=base_url,\n name=source_dict['name'],\n instance_id=source_dict['instanceId'],\n object_type=source_dict['objectType'])\n\n def is_cluster(self) -> bool:\n \"\"\"\n Is this Server object of type 'Cluster'\n :return: True if this Server is Cluster, otherwise False\n \"\"\"\n return bool(self.type == self.TYPE_SERVER_CLUSTER)\n\n def is_physical_server(self) -> bool:\n \"\"\"\n Is this Server object of type 'Physical Server'\n :return: True if this Server is Physical server, otherwise False\n \"\"\"\n return bool(self.type == self.TYPE_PHYSICAL_SERVER)\n\n def pretty_type(self) -> str:\n \"\"\"\n Pretty print type of this server\n :return:\n \"\"\"\n if self.type == self.TYPE_SERVER_CLUSTER:\n result = 'Cluster'\n elif self.type == self.TYPE_PHYSICAL_SERVER:\n result = 'Physical Server'\n else:\n result = \"Unknown Type\"\n return result\n\n\nclass ServerCollection(StorageObjectCollection):\n \"\"\"\n Collection of Server objects\n \"\"\"\n\n def filter_physical_servers(self) -> 'ServerCollection':\n \"\"\"\n Return subset collection containing only servers of type 'Physical Server'\n :return: Subset containing only physical servers\n \"\"\"\n physical_servers = ServerCollection()\n for server in self:\n if server.is_physical_server():\n physical_servers.add(server)\n return physical_servers\n\n def filter_clusters(self) -> 'ServerCollection':\n \"\"\"\n Return subset collection containing only servers of type 'Cluster'\n :return: Subset containing only clusters\n \"\"\"\n clusters = ServerCollection()\n for server in self:\n if server.is_cluster():\n clusters.add(server)\n return clusters\n" }, { "alpha_fraction": 0.6443299055099487, "alphanum_fraction": 0.6520618796348572, "avg_line_length": 14.520000457763672, "blob_id": "1727478982528e5f490daac86c478ae868e2ce0f", "content_id": "d8a07dfeaa79461dc6f619dd01149e53e9cb7bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 38, "num_lines": 25, "path": "/setup.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport setuptools\n\nname = 'dell_storage_api'\nversion = '0.1'\nauthor = 'SK-CERT'\n\nscripts = ['bin/dell-storage-client',]\nrequires = [\n 'requests',\n 'texttable',\n 'pylint',\n 'mypy',\n]\n\npackages = setuptools.find_packages()\n\nsetuptools.setup(\n name=name,\n version=version,\n install_requires=requires,\n packages=packages,\n scripts=scripts\n)\n" }, { "alpha_fraction": 0.647972822189331, "alphanum_fraction": 0.647972822189331, "avg_line_length": 42.35789489746094, "blob_id": "6c23b70775a86a8a99cbb132841e862a5a3847cc", "content_id": "23c451afc116cf8ce2f6a5391adefbbcfa71d4c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8238, "license_type": "no_license", "max_line_length": 118, "num_lines": 190, "path": "/dell_storage_api/storage_object.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains generic classes that serve as a base for creation of more specific classes that represent objects\nin Dell Storage Manager (DSM) API.\nBase classes:\n - StorageObject: Base for standalone objects (e.g.: Volumes, Servers)\n - StorageObjectFolder: Standalone objects can be grouped into folders in DSM. This object\n represents such folders\n - StorageObjectCollection: Collection of StorageObject instances\n - StorageObjectFolderCollection: Collection of StorageObjectFolder instances\n\"\"\"\nfrom collections import Iterable\nfrom typing import Any\nfrom typing import Optional, Iterator, List, Dict\n\nfrom requests import Session\n\n\nclass StorageObject:\n \"\"\"\n Base class for more specific classes that represent standalone objects in DSM api, such as\n 'Volume' or 'Server'.\n \"\"\"\n\n def __init__(self, req_session: Session, base_url: str, name: str, instance_id: str) -> None:\n self.session = req_session\n self.base_url = base_url\n self.name = name\n self.instance_id = instance_id\n\n def __str__(self) -> str:\n return \"%s: %s (%s)\" % (self.__class__, self.name, self.instance_id)\n\n def build_url(self, endpoint_url: str) -> str:\n \"\"\"\n Build complete URL to the API endpoint by replacing single formatting character (%s) in endpoint_url with\n instance_id of the object and combining it with base url.\n :param endpoint_url: URL of the api endpoint (e.g.: '/StorageCenter/ScVolume/%s/MapToServer')\n :return: Full URL to the api endpoint containing base_url, endpoint_url and instance ID of the storage object\n \"\"\"\n return self.base_url + endpoint_url % self.instance_id\n\n @classmethod\n def from_json(cls, req_session: Session, base_url: str, source_dict: Dict[Any, Any]) -> 'StorageObject':\n \"\"\"\n Class method that creates instance of StorageObject from supplied dictionary. Source dictionary is expected\n to contain at least 'instanceId' and 'name' keys.\n :param req_session: requests.Session object with stored login cookies. (passed down\n from dell_storage_api.session.DsmSession)\n :param base_url: base URL of DSM\n :param source_dict: Dictionary containing data about storage object\n :return: instance of a StorageObject class\n \"\"\"\n return StorageObject(req_session=req_session,\n base_url=base_url,\n instance_id=source_dict['instanceId'],\n name=source_dict['name'])\n\n\nclass StorageObjectFolder(StorageObject):\n \"\"\"\n Base class for more specific classes that represent folders for objects in DSM api, such as\n 'VolumeFolder' or 'ServerFolder'.\n \"\"\"\n\n def __init__(self, req_session: Session, base_url: str, name: str,\n instance_id: str, parent_id: Optional[str]) -> None:\n super().__init__(req_session, base_url, name, instance_id)\n self.parent_id = parent_id\n\n @property\n def is_root(self) -> bool:\n \"\"\"\n Is this folder a root in folder hierarchy?\n :return: True if this folder is root, otherwise False\n \"\"\"\n return self.parent_id is None\n\n @classmethod\n def from_json(cls, req_session: Session, base_url: str, source_dict: Dict[Any, Any]) -> 'StorageObjectFolder':\n \"\"\"\n Class method that creates instance of StorageObjectFolder from supplied dictionary. Source dictionary is\n expected to contain at least 'instanceId', 'name' and 'parent' keys. 'parent' key is optional (it can be\n missing in case the folder is root) but if it's present, it should be dictionary containing at least\n 'instanceId' key.\n :param req_session: requests.Session object with stored login cookies. (passed down\n from dell_storage_api.session.DsmSession)\n :param base_url: base URL of DSM\n :param source_dict: Dictionary containing data about storage object folder\n :return: instance of a StorageObjectFolder class\n \"\"\"\n return StorageObjectFolder(req_session=req_session,\n base_url=base_url,\n instance_id=source_dict['instanceId'],\n name=source_dict['name'],\n parent_id=source_dict.get('parent', {}).get('instanceId', None)\n )\n\n\nclass StorageObjectCollection(Iterable):\n \"\"\"\n Base class representing collection of StorageObject-s. Internally, StorageObjects are stored as dictionary\n indexed by object's instance ID.\n This class provides method for object searching by attributes that are common for every object type in\n DSM and those are 'name' and 'instanceId'.\n \"\"\"\n\n def __iter__(self) -> Iterator:\n return self._store.values().__iter__()\n\n def __bool__(self) -> bool:\n return bool(self._store)\n\n def __len__(self) -> int:\n return len(self._store)\n\n def __init__(self) -> None:\n self._store: Dict[str, StorageObject] = {}\n\n def add(self, storage_object: StorageObject) -> None:\n \"\"\"\n Add storage object to this collection\n :param storage_object: StorageObject to be added\n :return: None\n \"\"\"\n self._store[storage_object.instance_id] = storage_object\n\n def find_by_instance_id(self, instance_id: str) -> Optional[StorageObject]:\n \"\"\"\n Find StorageObject with specified instance_id in this collection. If no object is found, None is returned\n :param instance_id: Instance ID of a storage object to search for\n :return: StorageObject or None\n \"\"\"\n return self._store.get(instance_id, None)\n\n def find_by_name(self, name: str) -> 'StorageObjectCollection':\n \"\"\"\n Find StorageObject with specified name in this collection. Since names do not have to be unique, this method\n will always return StorageObjectCollection containing all the StorageObjects with given name. In case no\n StorageObject is found, returned collection is empty\n :param name: Name of the StorageObject to search for\n :return: StorageObjectCollection containing all the objects with given name\n \"\"\"\n result = StorageObjectCollection()\n for storage_object in self:\n if storage_object.name == name:\n result.add(storage_object)\n return result\n\n def all_objects(self) -> List[StorageObject]:\n \"\"\"\n Return all StorageObjects in this collection as a list\n :return: List of all storage objects in this collection\n \"\"\"\n return [item for item in self._store.values()]\n\n\nclass StorageObjectFolderCollection(StorageObjectCollection):\n \"\"\"\n Base class representing collection of StorageObjectFolder-s. Internally, StorageObjectFolders are stored as\n dictionary indexed by folders's instance ID.\n This class provides methods to search for root folders and to find folders taht belong to the specific\n parent folder.\n \"\"\"\n\n def root_folder(self) -> Optional[StorageObjectFolder]:\n \"\"\"\n Find and return StorageObjectFolder that is a root in folder hierarchy. If this collection does not contain\n root folder, return None\n :return: Root folder from this collection or None\n \"\"\"\n for folder in self:\n if folder.is_root:\n root = folder\n break\n else:\n root = None\n return root\n\n def find_by_parent_id(self, parent_id: str) -> 'StorageObjectFolderCollection':\n \"\"\"\n Create new StorageObjectFolderCollection that is subset of current collection and contains only\n StorageObjectFolders whose direct parent is folder with given parent_id (instance ID of a folder)\n :param parent_id: Instance ID of a parent folder\n :return: Folder collection containing folders with specific parent folder\n \"\"\"\n result = StorageObjectFolderCollection()\n for server_folder in self:\n if server_folder.parent_id == parent_id:\n result.add(server_folder)\n return result\n" }, { "alpha_fraction": 0.5912607312202454, "alphanum_fraction": 0.5938395261764526, "avg_line_length": 39.58139419555664, "blob_id": "e36b03ac1d0f2ab35e091d3ad6203db480dbeb02", "content_id": "f8497d221e6dbed29ac409c8f74d7657d5375cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6980, "license_type": "no_license", "max_line_length": 119, "num_lines": 172, "path": "/dell_storage_api/session.py", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "\"\"\" This module contains Session for communication with Dell Storage Manager (DSM) API. \"\"\"\nfrom typing import Optional\n\nimport urllib3\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom requests.structures import CaseInsensitiveDict\n\nfrom dell_storage_api.storage_center import StorageCenter, StorageCenterCollection\n\n\nclass DsmSession:\n \"\"\"\n This class represents HTTP Session with Dell Storage Manager (DSM). After successful login, underlying\n requests.Session object holds login cookie used to authorize all further requests to DSM API until its expiration.\n DsmSession object holds two important properties, 'base_url' and 'session' which are passed down to child objects\n like Storage Centers, Servers or Volumes. These child elements can then perform their own specific API calls to DSM\n by combining base_url, specific API endpoint and their unique Instance ID to create complete API endpoint URL and\n send requests to this complete endpoint using authenticated session.\n \"\"\"\n API_VERSION_HEADER = 'x-dell-api-verions'\n LOGIN_ENDPOINT = '/ApiConnection/Login'\n LOGOUT_ENDPOINT = '/ApiConnection/Logout'\n STORAGE_CENTER_LIST_ENDPOINT = '/ApiConnection/ApiConnection/%s/StorageCenterList'\n\n def __init__(self, username: str, password: str, host: str, port: int = 3033,\n api_version: str = '3.0', verify_cert: bool = True) -> None:\n self._host = host\n self._port = port\n self._username = username\n self._auth = HTTPBasicAuth(username, password)\n self._api_version = api_version\n self.base_url = 'https://%s:%s/api/rest' % (host, port)\n self.session = requests.Session()\n self.session.headers = CaseInsensitiveDict({'Content-Type': 'application/json',\n 'Accept': 'application/json',\n self.API_VERSION_HEADER: self._api_version})\n self.session.verify = verify_cert\n if not verify_cert:\n # Silence Warning about untrusted certificates if 'verify_cert' is None\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n self.conn_instance_id = None\n\n @property\n def api_version(self) -> str:\n \"\"\"\n Return API version used by DSM\n :return: DSM API version\n \"\"\"\n return self._api_version\n\n @api_version.setter\n def api_version(self, value: str) -> None:\n \"\"\"\n Set API version used by DSM\n :param value: New DSM API version\n :return: None\n \"\"\"\n self._api_version = value\n self.session.headers[self.API_VERSION_HEADER] = value\n\n @property\n def username(self) -> str:\n \"\"\"\n Return username used to authenticate this session with DSM\n :return: Username used for authentication with DSM\n \"\"\"\n return self._username\n\n @username.setter\n def username(self, value: str) -> None:\n \"\"\"\n Set username used to authenticate this session with DSM\n :param value: New Username\n :return: None\n \"\"\"\n self._username = value\n self._auth.username = value\n\n @property\n def login_url(self) -> str:\n \"\"\"\n Return complete URL to API login endpoint\n :return: URL for login to DSM\n \"\"\"\n return self.base_url + self.LOGIN_ENDPOINT\n\n @property\n def logout_url(self) -> str:\n \"\"\"\n Return complete URL for API logout endpoint\n :return: URL for logout from DSM\n \"\"\"\n return self.base_url + self.LOGOUT_ENDPOINT\n\n @property\n def sc_list_url(self) -> Optional[str]:\n \"\"\"\n Return complete URL for listing Storage Centers managed by this DSM\n :return: URL for Storage Center listing\n \"\"\"\n endpoint = self.STORAGE_CENTER_LIST_ENDPOINT % self.conn_instance_id if self.conn_instance_id else ''\n return (self.base_url + endpoint) if endpoint else None\n\n def set_password(self, password: str) -> None:\n \"\"\"\n Set new password for authentication with DSM\n :param password: New password value\n :return: None\n \"\"\"\n self._auth.password = password\n\n def login(self) -> bool:\n \"\"\"\n Perform call to API login endpoint. If authentication is successful, this method returns boolean value based\n on result of login call.\n :return: True if authentication completed successfully, otherwise False\n \"\"\"\n success = False\n resp = self.session.post(url=self.login_url, auth=self._auth)\n if resp.status_code == 200:\n reported_api_version = resp.json().get('apiVersion', None)\n if reported_api_version:\n self.api_version = reported_api_version\n try:\n self.conn_instance_id = resp.json()['instanceId']\n except KeyError:\n print(\"ERROR: SCM API did not report connection instance ID\")\n else:\n success = True\n else:\n print(\"ERROR: Login failed (%d) - %s\" % (resp.status_code, resp.text))\n return success\n\n def logout(self, silent: bool = False) -> None:\n \"\"\"\n Performs call to api logout endpoint. Status messages about result of logout operation can be silenced by\n specifying silent = True\n :param silent: Whether this method should print result of the logout operation\n :return: None\n \"\"\"\n resp = self.session.post(url=self.logout_url)\n if resp.status_code == 204:\n if not silent:\n print(\"Logout - OK\")\n else:\n if not silent:\n print(\"WARNING: Logout failed (%d) - %s\" % (resp.status_code, resp.text))\n\n def storage_centers(self) -> StorageCenterCollection:\n \"\"\"\n Return collection of storage centers managed by this DSM\n :return:\n \"\"\"\n url = self.sc_list_url\n storage_centers = StorageCenterCollection()\n if url is None:\n print(\"ERROR: Missing Connection ID, try logging in first\")\n else:\n resp = self.session.get(url=url)\n if resp.status_code == 200:\n for storage_center in resp.json():\n storage_centers.add(StorageCenter(req_session=self.session,\n base_url=self.base_url,\n name=storage_center['name'],\n instance_id=storage_center['instanceId'],\n serial_num=storage_center['scSerialNumber'],\n ip_addr=storage_center['hostOrIpAddress']))\n\n else:\n print(\"ERROR: Failed to load Storage Center list (%d) - %s\" % (resp.status_code, resp.text))\n return storage_centers\n" }, { "alpha_fraction": 0.7161290049552917, "alphanum_fraction": 0.7354838848114014, "avg_line_length": 16.22222137451172, "blob_id": "157de3ffabc6916f762c6effe424659be4fb4ca2", "content_id": "15d693eea00399238d32b24242d4398002e28b6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 155, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/mypy.ini", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "[mypy]\npython_version = 3.6\ndisallow_untyped_defs = True\n\n[mypy-urllib3.*]\nignore_missing_imports = True\n\n[mypy-texttable.*]\nignore_missing_imports = True\n" }, { "alpha_fraction": 0.6545454263687134, "alphanum_fraction": 0.7909091114997864, "avg_line_length": 12.75, "blob_id": "20da65ffc3e5420e88a5e815c139c30b70fef0eb", "content_id": "c8f3d0c535bc141e6bd1d27d3f78e50635f8f31d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 110, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/.pylintrc", "repo_name": "SK-CERT/dell-storage-client", "src_encoding": "UTF-8", "text": "[MESSAGE CONTROLL]\ndisable=R1705,R0913,R0902\n\n[FORMAT]\nmax-line-length=120\n\n[REPORTS]\noutput-format=colorized\n" } ]
10
eduardotrein/leonesto
https://github.com/eduardotrein/leonesto
669d24c3e71c6b827efec3d61cac06149c5ab390
ffb26d61a4d2218598246e6b81e881e41591b6f6
b8e8c5aff0219b800aab6b7538b57fb627ba489f
refs/heads/master
2020-09-15T16:39:42.837649
2020-02-15T14:09:01
2020-02-15T14:09:01
223,505,509
0
0
null
2019-11-22T23:57:01
2019-11-23T00:14:22
2019-11-23T00:25:35
null
[ { "alpha_fraction": 0.7779527306556702, "alphanum_fraction": 0.7779527306556702, "avg_line_length": 27.863636016845703, "blob_id": "8da6e6caeb7b19f8923ef08a99bb0f18470dd435", "content_id": "201c13aac0c3039643052db8d9bf1543734b924c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 637, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/README.md", "repo_name": "eduardotrein/leonesto", "src_encoding": "UTF-8", "text": "# leonesto\nInstruções\n\ncd /home/eduardo/Documentos/Estudo/git\n\ngit add README.md \ngit commit -m \"first commit\"\ngit config --global user.email \"[email protected]\"\ngit config --global user.name \"eduardotrein\"\ngit commit -m \"first commit\"\ngit remote add origin https://github.com/eduardotrein/leonesto.git\ngit push -u origin master\ngit add README.md \ngit commit -m \"Segundo Commit, com uma linha a mais\"\ngit remote add origin https://github.com/eduardotrein/leonesto.git\ngit push -u origin teste\ngit push -u origin master\n\n\n# Salvar senha para um repositorio\ngit config credential.helper store\ngit push https://github.com/eduardotrein/leonesto.git\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 34, "blob_id": "9d627d7c7e9b6025ccaa38bd1576b8b5b074a2d8", "content_id": "019279105d9bb565409b5b14d413fb25c87100ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/python/hello_world.py", "repo_name": "eduardotrein/leonesto", "src_encoding": "UTF-8", "text": "print( \"hello world!!!\",\"teste 5\")\n" } ]
2
pranavbhatt/py-practice
https://github.com/pranavbhatt/py-practice
ae9c2112d79677731834a56447dbb899a3916ce3
17865ef8c68b633d2fc5d12adcb0d7e93750ecb3
b1be4c4be336c98c20f77ce8e3ebc64b83ebf66c
refs/heads/master
2022-07-27T18:12:43.153545
2018-04-05T22:54:55
2018-04-05T22:54:55
124,005,405
0
0
null
2018-03-06T01:59:34
2018-04-05T22:55:06
2022-07-06T19:45:21
Python
[ { "alpha_fraction": 0.5813953280448914, "alphanum_fraction": 0.7383720874786377, "avg_line_length": 13.333333015441895, "blob_id": "60aada3f2799a6b55e4edef8f8b64ebecde273e0", "content_id": "f49fa9a21c643d897cc0aa1ff1e3e49d62faf653", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 172, "license_type": "no_license", "max_line_length": 26, "num_lines": 12, "path": "/requirements.txt", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.6.0\ncoreapi==2.3.3\nDjango==2.0.3\ndjangorestframework==3.7.7\nlxml==4.1.1\nrequests==2.18.4\nurllib3==1.22\npylint==1.7.6\npytest\npytest-django\nipdb\npytest-cov\n" }, { "alpha_fraction": 0.8289473652839661, "alphanum_fraction": 0.8289473652839661, "avg_line_length": 24.33333396911621, "blob_id": "8b00524456745f36640ef884093bdd1ec33bc2bc", "content_id": "86bf64d84afdf31355fd1f4bb027aaae6b3f4c4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "no_license", "max_line_length": 63, "num_lines": 3, "path": "/scrabble-play/README.md", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "# Scrabble\n\nusing sowpod scrabble dictionary, to solve interesting problems\n" }, { "alpha_fraction": 0.4442289173603058, "alphanum_fraction": 0.44616878032684326, "avg_line_length": 25.461538314819336, "blob_id": "5352bc622caa2305a88354f19bcbb25a8c99bc30", "content_id": "69652b36a87409f1507db0c7feb86c1b2af87c5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 55, "num_lines": 39, "path": "/problem_solving/binary_tree_level_order_traversal.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n result = []\n if not root:\n return result\n \n from collections import deque\n queue = deque()\n queue.append(root)\n \n while(len(queue) != 0):\n level_result = []\n next_level_queue = deque()\n \n while(len(queue) != 0):\n node = queue.popleft()\n level_result.append(node.val)\n \n if node.left:\n next_level_queue.append(node.left)\n \n if node.right:\n next_level_queue.append(node.right)\n \n result.append(level_result)\n queue = next_level_queue\n \n return result" }, { "alpha_fraction": 0.7410714030265808, "alphanum_fraction": 0.7410714030265808, "avg_line_length": 36.33333206176758, "blob_id": "c033e76aa054de6fd6c9fbdde2bf8472c119b44d", "content_id": "afdfdbbca7b4743f4823a62ea5cac9f9f34b6530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 112, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/tdd_example/pytest.ini", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "[pytest]\nDJANGO_SETTINGS_MODULE = tdd_example.test_settings \naddopts = --nomigrations --cov=. --cov-report=html\n" }, { "alpha_fraction": 0.4469964802265167, "alphanum_fraction": 0.4469964802265167, "avg_line_length": 34.4375, "blob_id": "6eb4b91428c94e4e461391800eabdcbdcd3beb38", "content_id": "8e45c53fde56b0e8a65ae1f92375f4c933e994ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/problem_solving/subdomain_visits.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "class Solution:\n def subdomainVisits(self, cpdomains):\n \"\"\"\n :type cpdomains: List[str]\n :rtype: List[str]\n \"\"\"\n map = {}\n for domain in cpdomains:\n count, *domains_arr = domain.replace(\" \",\".\").split(\".\")\n for i in range(len(domains_arr)):\n item = \".\".join(domains_arr[i:])\n if not item in map: \n map[item] = int(count)\n else: \n map[item] += int(count)\n return [\" \".join((str(v), k)) for k, v in map.items()]" }, { "alpha_fraction": 0.6356356143951416, "alphanum_fraction": 0.6376376152038574, "avg_line_length": 22.761905670166016, "blob_id": "a21b60db7324aa03f9f3775aeed4fe3471bba09c", "content_id": "adc06493946193ead879c15249f1a689405a9fdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 999, "license_type": "no_license", "max_line_length": 75, "num_lines": 42, "path": "/scrabble-play/solver.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "import scrabble\nimport string\n\n# print all words containing \"uu\"\nfor word in scrabble.word_list:\n if \"uu\" in word:\n print(word)\n\n# print all letters that never appear doubled\nfor letter in string.ascii_lowercase:\n exists = False\n for word in scrabble.word_list:\n if letter * 2 in word:\n exists = True\n break\n if not exists:\n print(\"There are no English words with a double {}\".format(letter))\n\n# find all words which contain all vowels\nVOWELS = \"aeiou\"\n\n\ndef has_all_vowels(word):\n for vowel in VOWELS:\n if vowel not in word:\n return False\n return True\n\n\nfor word in scrabble.word_list:\n if has_all_vowels(word):\n print(\"Word with all vowels is : {}\".format(word))\n\n# find the longest palindromes\nlongest_word = \"\"\n\nfor word in scrabble.word_list:\n if word != word[::-1]:\n if len(word) > len(longest_word):\n longest_word = word\n\nprint(\"{} is the longest palindrome\".format(longest_word))\n\n" }, { "alpha_fraction": 0.7780678868293762, "alphanum_fraction": 0.7911227345466614, "avg_line_length": 94.75, "blob_id": "71331e7225e578e73537f1965abf574a2a6d4b09", "content_id": "fbdd9eee17bbbbbec203237f6b025cbcac72990b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 130, "num_lines": 12, "path": "/problem_solving/README.md", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "# Data Structure and Algorithms\n\n1. LRU Cache Implementation [(Description)](https://leetcode.com/problems/lru-cache/description/)\n2. Trie Implementation [(Description)](https://leetcode.com/problems/implement-trie-prefix-tree/description/)\n3. Maximum Subarray [(Description)](https://leetcode.com/problems/maximum-subarray/description/)\n4. Preorder Traversal (Iterative) [(Description)](https://leetcode.com/problems/binary-tree-preorder-traversal/description/)\n5. Inorder Traversal (Iterative) [(Description)](https://leetcode.com/problems/binary-tree-inorder-traversal/description/)\n6. Postorder Traversal (Iterative) [(Description)](https://leetcode.com/problems/binary-tree-inorder-traversal/description/)\n7. Search 2D Matrix [(Description)](https://leetcode.com/problems/search-a-2d-matrix/description/)\n8. Search 2D Matrix II [(Description)](https://leetcode.com/problems/search-a-2d-matrix-ii/description/)\n9. Binary Tree Level Order Traversal [(Description)](https://leetcode.com/problems/binary-tree-level-order-traversal/description/)\n10. Subdomain Visits [(Description)](https://leetcode.com/problems/subdomain-visit-count/description/)\n" }, { "alpha_fraction": 0.7570621371269226, "alphanum_fraction": 0.7570621371269226, "avg_line_length": 43, "blob_id": "26aac61eb6153b8de13f537f5e5692db134851d4", "content_id": "968b1273872eaac18df58af47e436a0517800838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 177, "license_type": "no_license", "max_line_length": 101, "num_lines": 4, "path": "/apod_scraper/README.md", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "# Simple Scraper to download images \n\n* Scrape images from [Astronomy Picture of the Day - NASA](http://apod.nasa.gov/apod/archivepix.html)\n* Store images in a local directory\n\n" }, { "alpha_fraction": 0.6952117681503296, "alphanum_fraction": 0.6970534324645996, "avg_line_length": 30.941177368164062, "blob_id": "a64193ecd9174dcb835596a02dfbd2e6e6a312be", "content_id": "5b421f0360465235b323faf1c997025650bc2149", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 83, "num_lines": 34, "path": "/apod_scraper/scraper.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "import os\nimport pathlib\nimport shutil\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.compat import urljoin\n\n# apod images directory name\nAPOD_IMAGES_DIRECTORY = \"apod_images\"\nBASE_URL = \"http://apod.nasa.gov/apod/archivepix.html\"\n\n# create a directory to store images, if it does not exist already\npathlib.Path(APOD_IMAGES_DIRECTORY).mkdir(exist_ok=True)\n\n# Downloading the index page\napod_base_page = requests.get(BASE_URL).text\n\nfor link in BeautifulSoup(apod_base_page, \"lxml\").find_all('a'):\n print(\"Following link:\", link)\n href = urljoin(BASE_URL, link[\"href\"])\n\n # Follow the link and pull down the image on that link page\n content = requests.get(href).text\n\n for img in BeautifulSoup(content, \"lxml\").find_all('img'):\n img_href = urljoin(href, img[\"src\"])\n\n print(\"Downloading image:\", img_href)\n img_name = img_href.split(\"/\")[-1]\n\n response = requests.get(img_href, stream=True)\n with open(os.path.join(APOD_IMAGES_DIRECTORY, img_name), 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n" }, { "alpha_fraction": 0.7831325531005859, "alphanum_fraction": 0.7831325531005859, "avg_line_length": 40.5, "blob_id": "f067a65e5780c6e8801c88677c7339a1440f2687", "content_id": "58dcc18ccbe22330699db5cc4bac69b6f6ae50a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 47, "num_lines": 2, "path": "/drf_example/README.md", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "# Django Rest Framework Sample APP\n##### CRUD operations for Programming Languages\n" }, { "alpha_fraction": 0.4376237690448761, "alphanum_fraction": 0.4455445408821106, "avg_line_length": 24.25, "blob_id": "fb70b6df1d51590341a19cc972bb5d49a4f8fe0e", "content_id": "816eeb874953b07e80374c4526a839897d92b16d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 49, "num_lines": 20, "path": "/problem_solving/max_subarray.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "import sys\n\nclass Solution:\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n max_so_far = -sys.maxsize - 1\n max_ending_here = 0\n \n for n in nums:\n max_ending_here = max_ending_here + n\n if max_so_far < max_ending_here:\n max_so_far = max_ending_here\n \n if max_ending_here < 0:\n max_ending_here = 0\n \n return max_so_far\n" }, { "alpha_fraction": 0.40407469868659973, "alphanum_fraction": 0.4159592390060425, "avg_line_length": 22.117647171020508, "blob_id": "798d7fe27c5c976d0106d7eb503528393f3c3063", "content_id": "10b4b938933b5f23bd0a2335ebf8c10f089ba3bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 46, "num_lines": 51, "path": "/problem_solving/search_2d_matrix.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "class Solution:\n def searchMatrix(self, matrix, target):\n if len(matrix) == 0:\n return False\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n m = len(matrix)\n n = len(matrix[0]);\n \n start = 0;\n end = m*n-1;\n \n while start<=end:\n mid = int(start + (end - start)/2)\n midX = int(mid/n)\n midY = int(mid%n)\n \n if matrix[midX][midY]==target:\n return True\n \n if matrix[midX][midY]<target:\n start=mid+1\n else: \n end=mid-1\n \n return False\n\n\nclass Solution:\n def searchMatrix(self, matrix, target):\n if len(matrix) == 0:\n return False\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n i = 0\n j = len(matrix[0]) - 1\n \n while i < len(matrix) and j >= 0 :\n if target == matrix[i][j]:\n return True\n elif target > matrix[i][j]:\n i += 1\n else:\n j -= 1\n return False" }, { "alpha_fraction": 0.7463768124580383, "alphanum_fraction": 0.7536231875419617, "avg_line_length": 22, "blob_id": "ab477be9f7eb9f5ff080d57b404089c8864a334a", "content_id": "8b593d9109aa33066b3ea84a04814c4e85ef8f82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 138, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/README.md", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "# py-practice\ncode to ramp up my py level\n\n## setup python using virtualenv\n* virtualenv --python=python3.x env\n* source env/bin/activate\n" }, { "alpha_fraction": 0.3109452724456787, "alphanum_fraction": 0.3805970251560211, "avg_line_length": 39.20000076293945, "blob_id": "bf917fd2370817bbaf641a7d2b0c32afc6a0b112", "content_id": "79b88e286bd2e7d1a0ba155bbe6178f61cdcf12d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/scrabble-play/scrabble.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "WORD_LIST = \"sowpods_letters.txt\"\nword_list = open(WORD_LIST).readlines()\nword_list = [word.lower().strip() for word in word_list]\n\nscores = {\"a\": 1, \"c\": 3, \"b\": 3, \"e\": 1, \"d\": 2,\n \"f\": 4, \"g\": 2, \"h\": 4, \"i\": 1, \"j\": 8,\n \"k\": 5, \"l\": 1, \"m\": 3, \"n\": 3, \"o\": 1,\n \"p\": 3, \"q\": 10, \"r\": 1, \"s\": 1, \"t\": 1,\n \"u\": 1, \"v\": 4, \"w\": 3, \"x\": 8, \"y\": 4,\n \"z\": 10}\n" }, { "alpha_fraction": 0.4772946834564209, "alphanum_fraction": 0.47777777910232544, "avg_line_length": 24.887500762939453, "blob_id": "e9b9625195d56ce42b2f824b0342cba2ab4d2e9e", "content_id": "8f2e3d467444d4e048a94b97ad0d7045f10cf4ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2070, "license_type": "no_license", "max_line_length": 64, "num_lines": 80, "path": "/problem_solving/lru_cache.py", "repo_name": "pranavbhatt/py-practice", "src_encoding": "UTF-8", "text": "class LRUNode:\n def __init__(self, key=None, val=None):\n self.value = val\n self.key = key\n self.next = None\n self.prev = None\n\nclass LRUList:\n def __init__(self): \n self.head = LRUNode()\n self.tail = LRUNode()\n self.head.next = self.tail\n self.tail.prev = self.head\n \n def insert(self, node):\n #node is always added to front\n node.next = self.head.next\n node.prev = self.head\n self.head.next.prev = node\n self.head.next = node\n \n def delete(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n node.next = None\n node.prev = None\n \n def pop(self):\n node = self.tail.prev\n #cache is empty\n if node == self.head:\n return\n self.delete(node)\n return node\n \n def make_recent(self, node):\n self.delete(node)\n self.insert(node)\n\n \nclass LRUCache:\n def __init__(self, capacity):\n \"\"\"\n :type capacity: int\n \"\"\"\n self.lru_list = LRUList()\n self.lru_map = {}\n self.capacity = capacity \n \n def get(self, key):\n \"\"\"\n :type key: int\n :rtype: int\n \"\"\"\n if key in self.lru_map:\n node = self.lru_map[key]\n self.lru_list.make_recent(node)\n return node.value\n return -1\n \n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: void\n \"\"\"\n # cache hit\n if key in self.lru_map:\n node = self.lru_map[key]\n node.value = value\n self.lru_list.make_recent(node)\n else:\n # cache full? pop lru item\n if len(self.lru_map) == self.capacity:\n node = self.lru_list.pop()\n del self.lru_map[node.key]\n node = LRUNode(key=key, val=value)\n self.lru_list.insert(node)\n self.lru_map[key] = node" } ]
15
diwadd/PathImageSegmentation
https://github.com/diwadd/PathImageSegmentation
f487a114f600449c6a1538ee3f1eabe2ee6f8bb3
2c9efb7f69e5cfe8de21d04a16e676cc6686fc7c
77f53a619bed85e9d91d508d6d39fc4dbf328087
refs/heads/master
2021-01-02T22:51:54.661577
2017-08-07T19:51:24
2017-08-07T19:52:01
99,406,410
0
0
null
2017-08-05T07:18:26
2017-08-05T07:19:03
2017-08-05T07:21:45
Python
[ { "alpha_fraction": 0.5162613987922668, "alphanum_fraction": 0.5376899838447571, "avg_line_length": 27.236051559448242, "blob_id": "f4f5e6d5ae43ac3cdc357185b87dd2acf5131652", "content_id": "54ba787e17929fc6556bfb7c5fb5507a2faeb89d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6580, "license_type": "permissive", "max_line_length": 80, "num_lines": 233, "path": "/main_training.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport glob\n\nimport numpy as np\n\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.applications.vgg16 import VGG16\nfrom keras import optimizers\n\nimport tensorflow as tf\n\nimport data_handling as dh\nimport deep_models as dm\n\nrandom.seed(111)\n\n\n\ndef data_generator(file_name_list, noli=20):\n \n \"\"\"\n noli - number of loaded images per yield\n \n \"\"\"\n\n while True:\n n = len(file_name_list)\n number_of_image_loads = round(n / noli)\n ptr = 0\n # print(\"n: \" + str(n))\n # print(\"number_of_image_loads: \" + str(number_of_image_loads))\n\n for i in range(number_of_image_loads):\n # print(\"We are a i: \" + str(i))\n # create numpy arrays of input data\n # and labels, from each line in the file\n mini_batch_fnl = file_name_list[ptr:(ptr + noli)]\n ptr = ptr + noli\n\n x_data, y_data = dh.load_data_from_npz(mini_batch_fnl)\n yield (x_data, y_data)\n\n\ndef evaluate(model, file_name_list, noli=20):\n \n \"\"\"\n noli - number of loaded images per yield\n \n \"\"\"\n\n n = len(file_name_list)\n number_of_image_loads = round(n / noli)\n ptr = 0\n # print(\"n: \" + str(n))\n # print(\"number_of_image_loads: \" + str(number_of_image_loads))\n\n mean_loss = 0.0\n mean_acc = 0.0\n for i in range(number_of_image_loads):\n # print(\"We are a i: \" + str(i))\n # create numpy arrays of input data\n # and labels, from each line in the file\n mini_batch_fnl = file_name_list[ptr:(ptr + noli)]\n ptr = ptr + noli\n\n x_data, y_data = dh.load_data_from_npz(mini_batch_fnl)\n score = model.evaluate(x_data, y_data, verbose=0)\n\n #print(\"score: \" + str(score))\n\n local_loss = score[0]\n local_acc = score[1]\n\n mean_loss += local_loss\n mean_acc += local_acc\n mean_loss /= number_of_image_loads\n mean_acc /= number_of_image_loads\n print(\"Mean loss: \" + str(mean_loss) + \" - mean acc: \" + str(mean_acc))\n \n\n\nif __name__ == \"__main__\":\n print(\"Starting main.\")\n\n # ------------------------------------------------------------------------\n # Load data\n # ------------------------------------------------------------------------\n\n data_file_names = glob.glob(\"augmented_images/*npz\")\n\n n_files = len(data_file_names)\n\n loaded_data = np.load(data_file_names[0])\n image = loaded_data[\"image\"]\n label = loaded_data[\"label\"]\n\n iw, ih, ic = image.shape\n nw, nh, nc= label.shape\n\n random.shuffle(data_file_names)\n train_fraction = 0.6\n valid_fraction = 1.0 - train_fraction\n test_fraction = 0.5\n\n x_train_fnl = data_file_names[0:int(train_fraction*n_files)]\n temp = data_file_names[int(train_fraction*n_files):]\n x_valid_fnl = temp[0:int(len(temp)/2)]\n x_test_fnl = temp[int(len(temp)/2):]\n\n print(\"First train file: \" + str(x_train_fnl[0]))\n\n print(\"Number of train data files: \" + str(len(x_train_fnl)))\n print(\"Number of valid data files: \" + str(len(x_valid_fnl)))\n print(\"Number of test data files: \" + str(len(x_test_fnl)))\n\n K.get_session()\n\n\n new_model = False\n if (new_model == True):\n print(\"Creating a new model!\")\n\n\n #model = FCN(input_shape=(iw, ih, ic), classes=2, \n # weights='imagenet', trainable_encoder=True)\n\n model = dm.vgg16_16s_fcn(iw, \n ih, \n ic,\n dropout=0.5,\n alpha=0.0001,\n classes=2)\n\n opt = optimizers.SGD(lr=0.0001, momentum=0.9, clipvalue=0.5)\n opt = optimizers.Adam(1e-4, clipvalue=0.5)\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=opt,\n metrics=[\"accuracy\"])\n\n\n vgg16_model = VGG16(weights='imagenet', include_top=False)\n vgg16_model.compile(loss=\"categorical_crossentropy\",\n optimizer=\"adadelta\")\n\n index = 12\n print(\"Our model config: \")\n print(model.layers[index].get_config())\n\n print(\"VGG16 model config: \")\n print(vgg16_model.layers[index].get_config())\n\n print(\"Our model:\")\n layer1 = model.layers[index].get_weights()\n print(layer1[0].shape)\n print(layer1[0][0,0,0,1:5])\n\n print(\"VGG16 model:\")\n layer1 = vgg16_model.layers[index].get_weights()\n print(layer1[0].shape)\n print(layer1[0][0,0,0,1:5])\n\n\n for i in range(len(vgg16_model.layers)):\n\n print(\"\\n\\n\\n Layer number: \" + str(i))\n\n layer_config = vgg16_model.layers[i].get_config()\n print(\"Setting name: \" + str(layer_config['name']))\n if layer_config['name'] == 'block5_pool':\n break\n\n layer_weights = vgg16_model.layers[i].get_weights()\n\n if len(layer_weights) != 0:\n print(layer1[0].shape)\n model.layers[i].set_weights(vgg16_model.layers[i].get_weights())\n else:\n print(\"No wieghts to set!\")\n\n print(\"After setting the weights...\")\n\n print(\"Our model:\")\n layer1 = model.layers[index].get_weights()\n print(layer1[0].shape)\n print(layer1[0][0,0,0,1:5])\n\n print(\"VGG16 model:\")\n layer1 = vgg16_model.layers[index].get_weights()\n print(layer1[0].shape)\n print(layer1[0][0,0,0,1:5])\n\n\n\n #model.compile(optimizer='adadelta',\n # loss='categorical_crossentropy',\n # metrics=['accuracy'])\n\n model.summary()\n\n\n else:\n model = load_model(\"model.h5\")\n print(\"Model loaded!\")\n model.summary()\n\n noli = 4\n n = len(x_train_fnl)\n number_of_image_loads = round(n / noli)\n print(\"Number of image loads: \" + str(number_of_image_loads))\n n_epochs = 100 \n n_sub_epochs = 5\n\n print(\"Pre training evaluation:\")\n evaluate(model, x_valid_fnl, noli=4)\n\n for i in range(n_epochs):\n print(\"Global epoch: \" + str(i) + \"/\" + str(n_epochs))\n model.fit_generator(data_generator(data_file_names, noli=noli),\n steps_per_epoch=number_of_image_loads,\n epochs=n_sub_epochs)\n print(\"Validation data mean loss: \")\n evaluate(model, x_valid_fnl, noli=4)\n\n\n model.save(\"model.h5\")\n\n print(\"Test data mean loss: \")\n evaluate(model, x_test_fnl, noli=64)\n\n\n K.clear_session()\n\n" }, { "alpha_fraction": 0.3254545331001282, "alphanum_fraction": 0.4509090781211853, "avg_line_length": 17.33333396911621, "blob_id": "d43756e65b061b6515c1c210768cfda3c2a3006e", "content_id": "e96cd63a8c5fb40927d1ca53e57575a5d8d16330", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "permissive", "max_line_length": 74, "num_lines": 30, "path": "/feeling_binary_crossentropy.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef bc(t, o):\n return -np.mean(t*np.log(o) + (1-t)*np.log(1-o))\n\n\nt = np.array([ 0, 0, 0, 1, 1, 1, 0, 0, 0, 1])\no = np.array([0.10, 0.10, 0.10, 0.95, 0.90, 0.90, 0.10, 0.10, 0.10, 0.90])\n\nprint(bc(t, o))\n\n\nn = 10000\nt = np.random.randint(2, size=n)\no = np.array([0.01 for i in range(n)])\n\nfor i in range(n):\n if t[i] == 1:\n o[i] = 0.99\n\nfor i in range(int(0.013*n)):\n if t[i] == 1:\n o[i] = 0.001\n elif t[i] == 0:\n o[i] = 0.999\n else:\n pass\n\nprint(bc(t, o))\n" }, { "alpha_fraction": 0.5835299491882324, "alphanum_fraction": 0.6379121541976929, "avg_line_length": 47.25833511352539, "blob_id": "4d97324a8ad7a9c93d3b2aac198fdd0e3cabdfb1", "content_id": "4dcefe5660c67311c4f3a21fb3bb58b36f0c330d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17377, "license_type": "permissive", "max_line_length": 194, "num_lines": 360, "path": "/deep_models.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import keras\n\nfrom keras.models import Model\nfrom keras.models import Sequential\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.layers import Input\nfrom keras.layers import Conv2D\nfrom keras.layers import Conv2DTranspose\nfrom keras.layers import Cropping2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.layers import Activation\nfrom keras.layers import GlobalAveragePooling2D\n\n\nfrom keras.layers import Dropout \nfrom keras.layers import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\n\nfrom keras import regularizers\nfrom keras import backend as K\n\ndef basic_model(iw=500, # Input width\n ih=500, # Input height \n ic=3,\n ow=100, # Output width\n oh=100, # Output heigth\n dropout=0.9,\n alpha=0.0):\n\n input_image = Input((iw, ih, ic))\n\n x = Conv2D(16, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_1_layer_1\")(input_image)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(16, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_1_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_1_pooling\")(x)\n\n x = Conv2D(32, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_2_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(32, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_2_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_2_pooling\")(x)\n\n x = Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_3_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_3_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_3_layer_3\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_3_pooling\")(x)\n\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_4_layer_1\")(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_4_layer_2\")(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_4_layer_3\")(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_4_pooling\")(x)\n\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_5_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_5_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_5_layer_3\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_5_pooling\")(x)\n\n x = Conv2D(256, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_7_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(256, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_7_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(256, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_7_layer_3\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_7_pooling\")(x)\n\n x = Flatten(name=\"flatten\")(x)\n x = Dense(4096, activation=\"linear\", name=\"full_connected_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Dropout(dropout)(x)\n\n x = Dense(4096, activation=\"linear\", name=\"full_connected_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Dropout(dropout)(x)\n\n x = Dense(ow*oh, activation=\"sigmoid\", name=\"predictions\")(x)\n\n model = Model(input_image, x, name=\"vgg16_based\")\n\n model.compile(loss=\"binary_crossentropy\",\n optimizer=\"adadelta\")\n\n print(\"\\n ---> Model summary <--- \\n\")\n model.summary()\n\n return model\n\ndef basic_model_pooling(iw=500, # Input width\n ih=500, # Input height \n ic=3,\n ow=100, # Output width\n oh=100, # Output heigth\n dropout=0.9,\n alpha=0.001):\n\n input_image = Input((iw, ih, ic))\n\n x = Conv2D(16, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_1_layer_1\")(input_image)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(16, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_1_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_1_pooling\")(x)\n\n x = Conv2D(32, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_2_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(32, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_2_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_2_pooling\")(x)\n\n x = Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_3_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_3_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_3_layer_3\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_3_pooling\")(x)\n\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_4_layer_1\")(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_4_layer_2\")(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_4_layer_3\")(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_4_pooling\")(x)\n\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_5_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_5_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_5_layer_3\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name=\"block_5_pooling\")(x)\n\n x = Conv2D(256, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_6_layer_1\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(512, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_6_layer_2\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n x = Conv2D(ow*oh, (3, 3), activation=\"linear\", padding=\"same\", name=\"block_6_layer_3\")(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha)(x)\n\n x = GlobalAveragePooling2D()(x)\n\n model = Model(input_image, x, name=\"vgg16_based\")\n\n model.compile(loss=\"binary_crossentropy\",\n optimizer=\"adadelta\")\n\n print(\"\\n ---> Model summary <--- \\n\")\n model.summary()\n\n return model\n\n\ndef vgg16_32s_fcn(iw=500, # Input width\n ih=500, # Input height \n ic=3,\n dropout=0.5,\n alpha=0.001,\n classes=2):\n # Based on:\n # Fully Convolutional Models for Semantic Segmentation\n # Evan Shelhamer*, Jonathan Long*, Trevor Darrell\n # PAMI 2016\n # arXiv:1605.06211\n\n reg_fun = regularizers.l2(alpha)\n\n input_image = Input((iw, ih, ic))\n\n # Conv 1\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv1')(input_image)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv2')(x)\n pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Conv 2\n x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv1')(pool1)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv2')(x)\n pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Conv 3\n x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv1')(pool2)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv2')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv3')(x)\n pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Conv 4\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv1')(pool3)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv3')(x)\n pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Conv 5\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv1')(pool4)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv3')(x)\n pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n # Fully Conv fc6\n fc6 = Conv2D(4096, (7, 7), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc6')(pool5)\n drop6 = Dropout(rate=dropout)(fc6)\n\n # Fully Conv fc7\n fc7 = Conv2D(4096, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc7')(drop6)\n drop7 = Dropout(rate=dropout)(fc7)\n\n score_fr = Conv2D(classes, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='score_fr')(drop7)\n\n upscore = Conv2DTranspose(classes, kernel_size=(64, 64), strides=(32, 32), kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='upscore')(score_fr)\n\n\n _, uw, uh, uc = upscore._keras_shape\n cw = (uw - iw)//2\n ch = (uh - ih)//2\n print(\"cw: \" + str(cw))\n print(\"ch: \" + str(ch))\n\n score = Cropping2D(cropping=(cw, ch))(upscore)\n output = Activation('softmax')(score)\n\n model = Model(input_image, output, name=\"vgg16_based\")\n\n return model\n\n\ndef vgg16_16s_fcn(iw=500, # Input width\n ih=500, # Input height \n ic=3,\n dropout=0.5,\n alpha=0.001,\n classes=2):\n # Based on:\n # Fully Convolutional Models for Semantic Segmentation\n # Evan Shelhamer*, Jonathan Long*, Trevor Darrell\n # PAMI 2016\n # arXiv:1605.06211\n\n reg_fun = regularizers.l2(alpha)\n\n input_image = Input((iw, ih, ic))\n\n # Conv 1\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv1')(input_image)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv2')(x)\n pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Conv 2\n x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv1')(pool1)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv2')(x)\n pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Conv 3\n x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv1')(pool2)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv2')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv3')(x)\n pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Conv 4\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv1')(pool3)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv3')(x)\n pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Conv 5\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv1')(pool4)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv3')(x)\n pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n # Fully Conv fc6\n fc6 = Conv2D(4096, (7, 7), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc6')(pool5)\n drop6 = Dropout(rate=dropout)(fc6)\n\n # Fully Conv fc7\n fc7 = Conv2D(4096, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc7')(drop6)\n drop7 = Dropout(rate=dropout)(fc7)\n\n score_fr = Conv2D(classes, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='score_fr')(drop7)\n upscore2 = Conv2DTranspose(classes, kernel_size=(4, 4), strides=(2, 2), kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='upscore2')(score_fr)\n\n\n score_pool4 = Conv2D(classes, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='score_pool4')(pool4)\n\n _, uw, uh, uc = upscore2._keras_shape\n _, sw, sh, sc = score_pool4._keras_shape\n\n if ((uw - sw) == 1) or ((uh - sh) == 1):\n cw1 = 1\n ch1 = 1\n cropping = ((cw1, 0),(ch1, 0))\n else:\n cw1 = (uw - sw)//2\n ch1 = (uh - sh)//2\n cropping = (cw1, ch1)\n\n print(\"cw1: \" + str(cw1))\n print(\"ch1: \" + str(ch1))\n\n print(\"upscore2._keras_shape \" + str(upscore2._keras_shape))\n print(\"score_pool4._keras_shape \" + str(score_pool4._keras_shape))\n\n # Technically score_pool4 should have a larger size then upscore2.\n # At least that is what follows from crop(n.score_pool4, n.upscore2).\n # This is, however, not the case and we nned to crop upscore2.\n\n score_pool4c = Cropping2D(cropping=cropping)(upscore2) \n fuse_pool4 = keras.layers.Add()([score_pool4c, score_pool4])\n\n upscore16 = Conv2DTranspose(classes, kernel_size=(32, 32), strides=(16, 16), kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='upscore16')(fuse_pool4)\n\n _, uw, uh, uc = upscore16._keras_shape\n cw2 = (uw - iw)//2\n ch2 = (uh - ih)//2\n #print(\"cw2: \" + str(cw2))\n #print(\"ch2: \" + str(ch2))\n\n score = Cropping2D(cropping=(cw2, ch2))(upscore16)\n output = Activation('softmax')(score)\n\n model = Model(input_image, output, name=\"vgg16_based\")\n\n return model\n\n\n\n\n" }, { "alpha_fraction": 0.5284159779548645, "alphanum_fraction": 0.6118500828742981, "avg_line_length": 29.518518447875977, "blob_id": "b84f2279d5954ca6a997eb93f107c605266f5e34", "content_id": "4524546c634058dfdda3bc02f97a8e1542f85f12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 827, "license_type": "permissive", "max_line_length": 78, "num_lines": 27, "path": "/check_numpy_saved_array.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport data_handling as dh\n\nif __name__ == \"__main__\":\n\n #file_name = \"augmented_images/numpy_image_array_id_0_0p0_30_30_0p85.npz\"\n file_name = \"augmented_images/numpy_image_array_id_0_3p0_10_10_1p0.npz\"\n\n loaded_data = np.load(file_name)\n\n image = (255.0*loaded_data[\"image\"]).astype(np.uint8)\n label = (255.0*loaded_data[\"label\"]).astype(np.uint8)\n #label = np.reshape(label, (100, 100))\n\n print(\"label shape: \" + str(label.shape))\n\n #label = cv2.resize(label,\n # (500, 500),\n # interpolation = cv2.INTER_LINEAR)\n #mask = cv2.imread(\"augmented_images/truth_image_id_0_0p0_30_30_0p85.png\")\n\n dh.plot_three_images(image, label[:, :, 0], label[:, :, 1])\n\n print(label[100:120, 100:120])\n\n\n\n" }, { "alpha_fraction": 0.5060120224952698, "alphanum_fraction": 0.5751503109931946, "avg_line_length": 22.447059631347656, "blob_id": "fff37a204466de8854984b1f7e84c138a67c3e4d", "content_id": "5294bab4360471e30e63e97980a084e7aa3c5f95", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1996, "license_type": "permissive", "max_line_length": 116, "num_lines": 85, "path": "/temp.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport glob\n\nimport cv2\nimport numpy as np\n\nimport keras\n\n#from keras import backend as K\n#from keras.models import load_model\n#from tensorflow.python.ops import math_ops\n\n#import deep_models as dm\n\n\n# model = dm.vg16_fcn()\n# model.summary()\n# model.compile(loss=\"categorical_crossentropy\",\n# optimizer=\"adadelta\")\n\n\"\"\"\n\ndef cross_entropy(t, p):\n return -np.sum(t*np.log(p))\n\n\n\n\ntrue_arr = np.array([[[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]], \n [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]],\n [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]],])\n\n\npred_arr = np.array([[[0.9, 0.1], [0.9, 0.1], [0.9, 0.1]], \n [[0.9, 0.1], [0.99, 0.01], [0.99, 0.01]],\n [[0.9, 0.1], [0.99, 0.01], [0.9, 0.01]]])\n\npred_npr = np.array([[[9.9, 1.1], [8.9, 0.1], [1.9, 6.1]], \n [[9.9, 2.1], [9.4, 9.6], [2.5, 5.5]],\n [[9.9, 1.1], [9.2, 0.8], [3.9, 4.1]]])\n\nprint(true_arr.shape)\n\n\nce = cross_entropy(true_arr, pred_arr)\nprint(\"ce: \" + str(ce))\n\n\"\"\"\n\n\n\"\"\"\nK.get_session()\n\ntrue_var = K.variable(true_arr)\npred_var = K.variable(pred_arr)\npred_npr = K.variable(pred_npr)\n\n# output = pred_var / math_ops.reduce_sum(pred_var, reduction_indices=len(pred_var.get_shape()) - 1, keep_dims=True)\n# print(K.eval(output))\n\nsx = K.softmax(pred_npr)\nen = K.categorical_crossentropy(pred_var, true_var, from_logits=False)\n\nsx_eval = K.eval(sx)\nprint(\"sx_eval\")\nprint(sx_eval)\n\nen_eval = K.eval(en)\nprint(\"en_eval\")\nprint(en_eval)\n\nK.clear_session()\n\"\"\"\n\ninput1 = keras.layers.Input(shape=(16,))\nx1 = keras.layers.Dense(8, activation='relu')(input1)\ninput2 = keras.layers.Input(shape=(32,))\nx2 = keras.layers.Dense(8, activation='relu')(input2)\nadded = keras.layers.Add()([x1, x2]) # equivalent to added = keras.layers.add([x1, x2])\n\nprint(\"added._keras_shape: \" + str(added._keras_shape)) \n\nout = keras.layers.Dense(4)(added)\nmodel = keras.models.Model(inputs=[input1, input2], outputs=out)\n\n\n\n" }, { "alpha_fraction": 0.5030834674835205, "alphanum_fraction": 0.5170778036117554, "avg_line_length": 28.787986755371094, "blob_id": "f3101c7144555b606110f8abc8fb73279bfa08e9", "content_id": "e4a92105596c7d56ed2c7056c03044752ad9ec20", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8432, "license_type": "permissive", "max_line_length": 118, "num_lines": 283, "path": "/data_handling.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import glob\nimport cv2\nimport math\nimport sys\nimport os\nimport shutil\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.utils.np_utils import to_categorical\n\nDEFAULT_WIDTH = 500\nDEFAULT_HEIGHT = 500\n\ndef load_images(file_names_list):\n\n n_images = len(file_names_list)\n\n images = [None for i in range(n_images)]\n for i in range(n_images):\n images[i] = (cv2.imread(file_names_list[i]))\n\n return images\n\n\ndef read_data(data_dir=\"training/images/\"): \n\n train_file_names_list = glob.glob(data_dir + \"*.tif\")\n n_train_images = len(train_file_names_list)\n\n # The truth images have the same numbers.\n # We change the path and the extensions.\n # This ensures that train image will\n # correspond to truth image.\n truth_file_names_list = [None for i in range(n_train_images)]\n for i in range(n_train_images):\n\n if data_dir == \"training/images/\":\n truth_file_names_list[i] = train_file_names_list[i].replace(\".tif\",\"_mask.png\").replace(\"images\", \"truth\")\n elif data_dir == \"augmented_images/\":\n truth_file_names_list[i] = train_file_names_list[i].replace(\".tif\",\".png\").replace(\"train\", \"truth\")\n else:\n pass\n\n n_truth_images = len(truth_file_names_list)\n\n for i in range(n_train_images):\n tri = train_file_names_list[i]\n tui = truth_file_names_list[i]\n print(tri + \" \" + tui)\n\n train_images = load_images(train_file_names_list)\n truth_images = load_images(truth_file_names_list)\n\n return train_images, truth_images\n\n\ndef transform_label(label):\n\n lw, lh, lc = label.shape\n\n flat_label = np.zeros((lw,lh))\n\n for i in range(lw):\n for j in range(lh):\n if (label[i, j, 0] >= label[i, j, 1]):\n flat_label[i, j] = 0\n else:\n flat_label[i,j] = 1\n\n return flat_label\n\n\ndef resize_image_list(image_list, n_width, n_height):\n\n n_images = len(image_list)\n\n resized_image_list = [None for i in range(n_images)]\n for i in range(n_images):\n resized_image_list[i] = cv2.resize(image_list[i], \n (n_width, n_height), \n interpolation = cv2.INTER_LINEAR)\n\n return resized_image_list\n\n\ndef convert_image_list_to_grayscale(image_list):\n \n n_images = len(image_list)\n\n gray_scale_image_list = [None for i in range(n_images)]\n for i in range(n_images):\n gray_scale_image_list[i] = cv2.cvtColor(image_list[i], cv2.COLOR_BGR2GRAY)\n\n return gray_scale_image_list\n\n\ndef plot_two_images(bgr, gs):\n\n rows = 1\n cols = 2\n f, axs = plt.subplots(rows, cols)\n\n plt.subplot(rows, cols, 1)\n if len(bgr.shape) == 3:\n plt.imshow(cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB))\n else:\n plt.imshow(bgr)\n plt.colorbar()\n\n\n plt.subplot(rows, cols, 2)\n if len(gs.shape) == 3:\n plt.imshow(cv2.cvtColor(gs, cv2.COLOR_BGR2RGB))\n else:\n plt.imshow(gs)\n plt.colorbar()\n\n plt.show()\n\n\ndef plot_three_images(bgr, gs, img):\n\n rows = 1\n cols = 3\n f, axs = plt.subplots(rows, cols)\n\n plt.subplot(rows, cols, 1)\n if len(bgr.shape) == 3:\n plt.imshow(cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB))\n else:\n plt.imshow(bgr)\n plt.colorbar()\n\n\n plt.subplot(rows, cols, 2)\n if len(gs.shape) == 3:\n plt.imshow(cv2.cvtColor(gs, cv2.COLOR_BGR2RGB))\n else:\n plt.imshow(gs)\n plt.colorbar()\n\n plt.subplot(rows, cols, 3)\n if len(gs.shape) == 3:\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n else:\n plt.imshow(img)\n plt.colorbar()\n\n plt.show()\n\n\ndef augment_data(train_images, \n truth_images,\n nw_image=250,\n nh_image=250,\n nw_label=50,\n nh_label=50,\n augmented_images_dir=\"augmented_images/\",\n ap=[[0.0, 1.0], [180.0, 1.0]],\n save_images=True):\n\n # ap - augment parameters\n # ap[i][0] - rotation angle\n # ap[i][1] - scale\n\n is_dir = os.path.isdir(augmented_images_dir)\n if (is_dir == True):\n shutil.rmtree(augmented_images_dir)\n os.makedirs(augmented_images_dir)\n else:\n os.makedirs(augmented_images_dir)\n\n\n n_images = len(train_images)\n iw, ih, ic = train_images[0].shape\n mw, mh, _ = truth_images[0].shape\n\n\n if ((iw != mw) or (mh != mh)):\n sys.exit(\"ERROR: Dimension mismatch!\")\n\n for i in range(n_images):\n print(\"Image: \" + str(i) + \"/\" + str(n_images), end=\"\\r\")\n for j in range(len(ap)):\n\n rotation_angle = ap[j][0]\n scale = ap[j][1]\n\n R = cv2.getRotationMatrix2D((iw/2, ih/2), rotation_angle, scale)\n\n rotated_train_image = cv2.warpAffine(train_images[i], R, (iw, ih))\n rotated_truth_image = cv2.warpAffine(truth_images[i], R, (mw, mh))\n\n x_shift = ap[j][2]\n y_shift = ap[j][3]\n\n M = np.float32([[1, 0, x_shift],[0, 1, y_shift]])\n\n shifted_train_image = cv2.warpAffine(rotated_train_image, M, (iw, ih))\n shifted_truth_image = cv2.warpAffine(rotated_truth_image, M, (mw, mh))\n\n grayscale_truth_image = cv2.cvtColor(shifted_truth_image, cv2.COLOR_BGR2GRAY)\n\n resized_train_image = cv2.resize(shifted_train_image, \n (nw_image, nh_image), \n interpolation = cv2.INTER_LINEAR)\n\n resized_truth_image = cv2.resize(grayscale_truth_image, \n (nw_label, nh_label), \n interpolation = cv2.INTER_LINEAR)\n\n if save_images == True:\n cv2.imwrite(augmented_images_dir + \\\n \"train_image_id_\" + \\\n str(i) + \"_\" + \\\n str(rotation_angle).replace(\".\",\"p\") + \"_\" + \\\n str(x_shift) + \"_\" + \\\n str(y_shift) + \"_\" + \\\n str(scale).replace(\".\",\"p\") + \".tif\", \\\n shifted_train_image)\n\n cv2.imwrite(augmented_images_dir + \\\n \"truth_image_id_\" + \\\n str(i) + \"_\" + \\\n str(rotation_angle).replace(\".\",\"p\") + \"_\" + \\\n str(x_shift) + \"_\" + \\\n str(y_shift) + \"_\" + \\\n str(scale).replace(\".\",\"p\") + \".png\", \\\n shifted_truth_image)\n\n fn = augmented_images_dir + \\\n \"numpy_image_array_id_\" + \\\n str(i) + \"_\" + \\\n str(rotation_angle).replace(\".\",\"p\") + \"_\" + \\\n str(x_shift) + \"_\" + \\\n str(y_shift) + \"_\" + \\\n str(scale).replace(\".\",\"p\") + \".npz\"\n\n resized_truth_image[resized_truth_image > 0] = 1\n\n # This part is for FCN.\n # We destinguish between good and bad tissue so\n # we have just two classes.\n n_classes = 2\n resized_truth_image = to_categorical(resized_truth_image, n_classes)\n resized_truth_image = np.reshape(resized_truth_image, (nw_label, nh_label, n_classes))\n\n np.savez_compressed(fn, \n image=resized_train_image/255.0, \n #label=np.reshape(resized_truth_image, (nw_label*nh_label, 1)))\n label=resized_truth_image)\n\n\ndef load_data_from_npz(file_name_list):\n\n n_files = len(file_name_list)\n if n_files == 0:\n sys.exit(\"ERROR: File name list empty.\")\n\n loaded_data = np.load(file_name_list[0])\n image = loaded_data[\"image\"].astype(np.float32)\n label = loaded_data[\"label\"].astype(np.float32)\n\n iw, ih, ic = image.shape\n mw, mh, mc = label.shape\n\n x_data = np.zeros((n_files, ih, iw, ic))\n y_data = np.zeros((n_files, mw, mh, mc))\n\n x_data[0, :, :, :] = image\n y_data[0, :, :, :] = label\n\n for i in range(1, n_files):\n loaded_data = np.load(file_name_list[i])\n image = loaded_data[\"image\"].astype(np.float32)\n label = loaded_data[\"label\"].astype(np.float32)\n\n x_data[i, :, :, :] = image\n y_data[i, :, :, :] = label\n\n return x_data, y_data\n\n\n" }, { "alpha_fraction": 0.4591751992702484, "alphanum_fraction": 0.48214781284332275, "avg_line_length": 30.657894134521484, "blob_id": "374121e5cff7ffc912991d74cd71ff3bfeeea051", "content_id": "5cd75ac02c858f66f2b6780c4a89ddb87814c4a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3613, "license_type": "permissive", "max_line_length": 102, "num_lines": 114, "path": "/prepare_test_data.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport glob\nimport os\nimport shutil\n\nimport cv2\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\n\nfrom keras_fcn.layers import BilinearUpSampling2D\n\nimport data_handling as dh\nimport deep_models as dm\n\ndef load_and_resize_image(image_file_name,\n nw_image=250,\n nh_image=250):\n\n image = cv2.imread(image_file_name)\n\n resized_image = cv2.resize(image, \n (nw_image, nh_image), \n interpolation = cv2.INTER_LINEAR)\n return resized_image\n\n\ndef prepare_data_for_dispatch(model_file_name,\n test_images_list,\n nw_image=250,\n nh_image=250,\n nw_label=50,\n nh_label=50,\n images_dir=\"images_for_dispatch/\"):\n\n is_dir = os.path.isdir(images_dir)\n if (is_dir == True):\n shutil.rmtree(images_dir)\n os.makedirs(images_dir)\n else:\n os.makedirs(images_dir)\n\n K.get_session()\n model = load_model(model_file_name, custom_objects={\"BilinearUpSampling2D\": BilinearUpSampling2D})\n\n n_train_images = len(test_images_list)\n for i in range(n_train_images):\n print(\"i: \" + str(i + 1))\n resized_image = load_and_resize_image(test_images_list[i],\n nw_image=nw_image,\n nh_image=nh_image)\n\n # The the i000000 image indicator\n # from training/images/i000000.tif\n if test_images_list[i][0:3] == \"tra\":\n trunk = test_images_list[i][16:-4]\n elif test_images_list[i][0:3] == \"tes\":\n trunk = test_images_list[i][15:-4]\n else:\n pass\n\n print(trunk)\n fn = images_dir + str(trunk) + \".npz\"\n\n _, _, ic = resized_image.shape\n\n resized_image = np.reshape(resized_image, (1, nw_image, nh_image, ic))/255.0\n label = model.predict(resized_image)\n label = np.reshape(label, (nw_label, nh_label, 2))\n\n label = dh.transform_label(label)\n\n label = cv2.resize(label,\n (dh.DEFAULT_WIDTH, dh.DEFAULT_HEIGHT),\n interpolation = cv2.INTER_LINEAR)\n\n f = open(images_dir + trunk + \"_mask.txt\", \"w\")\n\n for v in range(dh.DEFAULT_WIDTH):\n for w in range(dh.DEFAULT_HEIGHT):\n if (label[v][w] > 0.5):\n f.write(\"1\")\n else:\n f.write(\"0\")\n f.write(\"\\n\")\n\n f.close()\n\n K.clear_session()\n\n\nif __name__ == \"__main__\":\n print(\"Starting main.\")\n\n # ------------------------------------------------------------------------\n # Load data \n # ------------------------------------------------------------------------\n\n train_images_list = glob.glob(\"training/images/*.tif\")\n test_images_list = glob.glob(\"testing/images/*.tif\")\n\n image_list = train_images_list + test_images_list\n\n # model_file_name = \"vgg16_16s_fcn_model_after_global_epoch_62.h5\"\n # model_file_name = \"vgg16_32s_fcn_model.h5\" \n model_file_name = \"model.h5\" \n prepare_data_for_dispatch(model_file_name,\n image_list,\n nw_image=500,\n nh_image=500,\n nw_label=500,\n nh_label=500,\n images_dir=\"images_for_dispatch/\")\n\n\n\n\n" }, { "alpha_fraction": 0.7676056623458862, "alphanum_fraction": 0.8098591566085815, "avg_line_length": 34, "blob_id": "886cca198d27f93f29955002481ecd8110115ef3", "content_id": "d1ba44351efd1b61f94bb3d2e535c7f3f7224af7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "permissive", "max_line_length": 48, "num_lines": 4, "path": "/README.md", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "We implement the FCN VGG16 32s and 16s networks.\n\nWe use these networks for segmentation in the \nPathological Image Segmentation Challenge.\n\n\n" }, { "alpha_fraction": 0.4825242757797241, "alphanum_fraction": 0.5543689131736755, "avg_line_length": 20.35416603088379, "blob_id": "aba5e2cd9eff6a25eac205395748a145a57d15c3", "content_id": "de509ba4265b79b89ff6f9df7989583f5a8716e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "permissive", "max_line_length": 116, "num_lines": 48, "path": "/categorical_crossentropy_testing.py", "repo_name": "diwadd/PathImageSegmentation", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport glob\n\nimport cv2\nimport numpy as np\n\nfrom keras import backend as K\nfrom tensorflow.python.ops import math_ops\n\nimport deep_models as dm\n\n\ndef cross_entropy(t, p):\n return -t*np.log(p)\n\n\n\n\ntrue_arr = np.array([[[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]], \n [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]],\n [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]],])\n\n\npred_arr = np.array([[[0.9, 0.1], [0.9, 0.1], [0.9, 0.1]], \n [[0.9, 0.1], [0.4, 0.6], [0.5, 0.5]],\n [[0.9, 0.1], [0.2, 0.8], [0.9, 0.1]]])\n\nprint(true_arr.shape)\n\n\nce = cross_entropy(true_arr, pred_arr)\nprint(\"ce: \" + str(ce))\n\nK.get_session()\n\ntrue_var = K.variable(true_arr)\npred_var = K.variable(pred_arr)\n\n# output = pred_var / math_ops.reduce_sum(pred_var, reduction_indices=len(pred_var.get_shape()) - 1, keep_dims=True)\n# print(K.eval(output))\n\nen = K.categorical_crossentropy(pred_var, true_var, from_logits=False)\n\nen_eval = K.eval(en)\nprint(en_eval)\n\nK.clear_session()\n\n\n\n\n\n" } ]
9
Gaterny/ActressSearch
https://github.com/Gaterny/ActressSearch
0b5069d859543a2236061775366314c5fa8669dc
6529adba73bb4dec1ea974df922cfc51ddae6564
328cb8dd4496b553b8c27901e8faaa9a6f505177
refs/heads/master
2020-04-12T19:55:14.052735
2018-12-22T08:25:40
2018-12-22T08:25:40
162,721,411
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7684210538864136, "alphanum_fraction": 0.7684210538864136, "avg_line_length": 18, "blob_id": "a42c88c43c3ac7650292b0baa1ff2788e1bfc62a", "content_id": "c0cb931b8e9799626fc94e49dc1873c6e82882e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/Search/actressapp/apps.py", "repo_name": "Gaterny/ActressSearch", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ActressappConfig(AppConfig):\n name = 'actressapp'\n" }, { "alpha_fraction": 0.5325379371643066, "alphanum_fraction": 0.536876380443573, "avg_line_length": 37.90140914916992, "blob_id": "ea45dbea034cf341db8c6b3fd299bcf69fb17d41", "content_id": "7502c5c2cfb3697f5c07d175d7c3670e0f245790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2856, "license_type": "no_license", "max_line_length": 92, "num_lines": 71, "path": "/Actress/Actress/spiders/actress_spider.py", "repo_name": "Gaterny/ActressSearch", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom Actress.items import ActressItem\n\n\nclass ActressSpiderSpider(scrapy.Spider):\n name = 'actress_spider'\n allowed_domains = ['javbus.com']\n start_urls = [\n 'https://www.javbus.com/actresses',\n 'https://www.javbus.com/uncensored/actresses'\n ]\n\n def parse(self, response):\n hrefs = response.xpath('//a[contains(@class, \"avatar-box\")]/@href').extract()\n for href in hrefs:\n yield scrapy.Request(url=href, callback=self.parse_detail)\n\n next_page = response.xpath('//a[@id=\"next\"]/@href').extract_first()\n if next is not None:\n next_url = response.urljoin(next_page)\n yield scrapy.Request(url=next_url, callback=self.parse)\n\n def parse_detail(self, response):\n item = ActressItem()\n name = response.xpath('//span[contains(@class, \"pb10\")]/text()').extract_first()\n\n # 以下信息有误,以后再处理\n # birth = response.xpath('//div[@class=\"photo-info\"]//p[1]/text()').extract_first()\n # age = response.xpath('//div[@class=\"photo-info\"]//p[2]/text()').extract_first()\n # # 腰围\n # waist = response.xpath('//div[@class=\"photo-info\"]//p[6]/text()').extract_first()\n # # 罩杯\n # cup = response.xpath('//div[@class=\"photo-info\"]//p[4]/text()').extract_first()\n # # 胸围\n # bust = response.xpath('//div[@class=\"photo-info\"]//p[5]/text()').extract_first()\n # # 臀围\n # hips = response.xpath('//div[@class=\"photo-info\"]//p[7]/text()').extract_first()\n # # 身高\n # height = response.xpath('//div[@class=\"photo-info\"]//p[3]/text()').extract_first()\n # 演员作品名\n film_list = response.xpath(\"//a[@class='movie-box']//span/text()\").extract()\n # 作品番号\n tags = response.xpath(\"//a[@class='movie-box']//date[1]/text()\").extract()\n # 作品时间\n dates = response.xpath(\"//a[@class='movie-box']//date[2]/text()\").extract()\n\n item['name'] = name\n # item['age'] = age\n # item['birth'] = birth\n # item['waist'] = waist\n # item['hips'] = hips\n # item['cup'] = cup\n # item['bust'] = bust\n # item['height'] = height\n\n films = []\n for film in film_list:\n # 去除换行符的干扰字符\n afilm = film.strip('\\t \\n \\r '' /')\n if afilm != '':\n films.append(afilm)\n for j in range(len(films)):\n item['film'] = films[j]\n item['tag'] = tags[j]\n item['date'] = dates[j]\n yield item\n next_page = response.xpath(\"//a[@id='next']/@href\").extract_first()\n if next_page:\n next_url = response.urljoin(next_page)\n yield scrapy.Request(url=next_url, callback=self.parse_detail)\n\n\n\n\n" }, { "alpha_fraction": 0.6349206566810608, "alphanum_fraction": 0.637188196182251, "avg_line_length": 20.536584854125977, "blob_id": "84bd4951e68106148d5f3779e2931a206633064f", "content_id": "acdb78b9dddafd542af415028aada8862d7dfc0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 72, "num_lines": 41, "path": "/Actress/Actress/models/es_type.py", "repo_name": "Gaterny/ActressSearch", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom elasticsearch_dsl.connections import connections\nfrom elasticsearch_dsl import Document, Text, Date, Keyword, Completion\nfrom elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer\n\n# 创建连接\nconnections.create_connection(hosts=['localhost'])\n\n\nclass CustomAnalyzer(_CustomAnalyzer):\n \"\"\"\n 自动补全\n \"\"\"\n def get_analysis_definition(self):\n return {}\n\n\nik_analyzer = CustomAnalyzer('ik_max_word', filter=['lowercase'])\n\n\nclass ActressType(Document):\n \"\"\"\n 类型配置\n \"\"\"\n suggest = Completion(analyzer=ik_analyzer)\n name = Text(analyzer='ik_max_word') # 分词\n date = Date()\n film = Text(analyzer='ik_max_word')\n tag = Keyword()\n\n class Index:\n name = 'actress'\n settings = {\n 'number_of_shards': 2,\n }\n\n\nif __name__ == \"__main__\":\n ActressType.init()" } ]
3
karanbudhraja/PABMD
https://github.com/karanbudhraja/PABMD
d8622289ebb1dd6fff60be22034d7633dd9bcad5
29982bef9fbd89e05404adfa1afe15c7a5ccd350
63d0e4df6913ae7c7cedd8d72b074273d934c204
refs/heads/master
2020-06-14T17:50:29.006765
2019-07-15T21:12:47
2019-07-15T21:12:47
195,070,897
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7330960631370544, "alphanum_fraction": 0.7348754405975342, "avg_line_length": 20.615385055541992, "blob_id": "5d3f87b1129a1658751e1a880d143d5ffd134a8e", "content_id": "0ba15ef118cabd0e3e620163c13f215df0612ea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 562, "license_type": "no_license", "max_line_length": 50, "num_lines": 26, "path": "/app/do_pipeline.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# go to target script directory\ncd ../src\nsource configuration.sh\n\n# copy to demonstration folder for lfd use\ncd ../data/demonstrations/$ABM\nrm -rf *\ncp -r ../../../app/matches .\n\n# move to appropriate directory to run lfd\ncd ../../../src\n\n# run the framework to suggest several alps\n# that would produce the demonstration slps\n./lfd.sh\n\n# simulate those alps\n# to check the actual slps that they correspond to\n./simulate.sh\n\n# filter from the suggested alps\n# use distance from demonstration slps\nexport USE_FILTER_DESCRIPTOR_SETTING=1\n./filter.sh\n" }, { "alpha_fraction": 0.7180395126342773, "alphanum_fraction": 0.7912715673446655, "avg_line_length": 89.23711395263672, "blob_id": "4f17937b5d1023ddb3b730250401bf2783d10d39", "content_id": "8b149f38b51d1b6a006c27a550c274e59df63929", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 8753, "license_type": "no_license", "max_line_length": 155, "num_lines": 97, "path": "/data/gifs_all/generate_gifs.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# convert demonstrations\nconvert -delay 50 -loop 0 ../demonstrations_all/flocking/simulated/1/*.png demonstration_1.gif\nconvert -delay 50 -loop 0 ../demonstrations_all/flocking/simulated/2/*.png demonstration_2.gif\nconvert -delay 50 -loop 0 ../demonstrations_all/flocking/simulated/3/*.png demonstration_3.gif\n\nconvert -delay 50 -loop 0 ../demonstrations_/flocking/1/*.png demonstration__1.gif\nconvert -delay 50 -loop 0 ../demonstrations_/flocking/2/*.png demonstration__2.gif\nconvert -delay 50 -loop 0 ../demonstrations_/flocking/3/*.png demonstration__3.gif\n\n# convert rotated images\nconvert -rotate 90 demonstration_1.gif demonstration_1_rotation.gif\n\n# convert translated images\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_200.png -background black -flatten 1108_200_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_201.png -background black -flatten 1108_201_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_202.png -background black -flatten 1108_202_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_203.png -background black -flatten 1108_203_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_204.png -background black -flatten 1108_204_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_205.png -background black -flatten 1108_205_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_206.png -background black -flatten 1108_206_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_207.png -background black -flatten 1108_207_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_208.png -background black -flatten 1108_208_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_209.png -background black -flatten 1108_209_translation.png\nconvert -page +100+0 ../demonstrations_all/flocking/simulated/1/1108_210.png -background black -flatten 1108_210_translation.png\nconvert +repage 1108_200_translation.png 1108_200_translation.png\nconvert +repage 1108_201_translation.png 1108_201_translation.png\nconvert +repage 1108_202_translation.png 1108_202_translation.png\nconvert +repage 1108_203_translation.png 1108_203_translation.png\nconvert +repage 1108_204_translation.png 1108_204_translation.png\nconvert +repage 1108_205_translation.png 1108_205_translation.png\nconvert +repage 1108_206_translation.png 1108_206_translation.png\nconvert +repage 1108_207_translation.png 1108_207_translation.png\nconvert +repage 1108_208_translation.png 1108_208_translation.png\nconvert +repage 1108_209_translation.png 1108_209_translation.png\nconvert +repage 1108_210_translation.png 1108_210_translation.png\nconvert -delay 50 -loop 0 *_translation.png demonstration_1_translation.gif\nrm *_translation.png\n\n# convert point selection methods\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_error_weighted/flocking/1/*.png point\\ selection/error_weighted_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_error_weighted/flocking/2/*.png point\\ selection/error_weighted_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_error_weighted/flocking/3/*.png point\\ selection/error_weighted_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_kmeans/flocking/1/*.png point\\ selection/kmeans_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_kmeans/flocking/2/*.png point\\ selection/kmeans_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_kmeans/flocking/3/*.png point\\ selection/kmeans_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_mean/flocking/1/*.png point\\ selection/mean_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_mean/flocking/2/*.png point\\ selection/mean_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_mean/flocking/3/*.png point\\ selection/mean_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_min_error/flocking/1/*.png point\\ selection/min_error_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_min_error/flocking/2/*.png point\\ selection/min_error_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/point\\ selection/predictions_min_error/flocking/3/*.png point\\ selection/min_error_3.gif\n\n# convert feature construction methods\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_contour/flocking/1/*.png feature\\ construction/contour_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_contour/flocking/2/*.png feature\\ construction/contour_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_contour/flocking/3/*.png feature\\ construction/contour_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_mnist/flocking/1/*.png feature\\ construction/mnist_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_mnist/flocking/2/*.png feature\\ construction/mnist_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_mnist/flocking/3/*.png feature\\ construction/mnist_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_vae/flocking/1/*.png feature\\ construction/vae_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_vae/flocking/2/*.png feature\\ construction/vae_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_vae/flocking/3/*.png feature\\ construction/vae_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_vgg16/flocking/1/*.png feature\\ construction/vgg16_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_vgg16/flocking/2/*.png feature\\ construction/vgg16_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_vgg16/flocking/3/*.png feature\\ construction/vgg16_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_resnet50/flocking/1/*.png feature\\ construction/resnet50_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_resnet50/flocking/2/*.png feature\\ construction/resnet50_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_resnet50/flocking/3/*.png feature\\ construction/resnet50_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_lstm/flocking/1/*.png feature\\ construction/lstm_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_lstm/flocking/2/*.png feature\\ construction/lstm_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_lstm/flocking/3/*.png feature\\ construction/lstm_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_lbp/flocking/1/*.png feature\\ construction/lbp_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_lbp/flocking/2/*.png feature\\ construction/lbp_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/feature\\ construction/predictions_lbp/flocking/3/*.png feature\\ construction/lbp_3.gif\n\n# convert dataset analysis methods\nconvert -delay 50 -loop 0 ../predictions_all/dataset\\ analysis/error_weighted/1/unguided/predictions_best/flocking/1/*.png dataset\\ analysis/unguided_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/dataset\\ analysis/error_weighted/2/unguided/predictions_best/flocking/2/*.png dataset\\ analysis/unguided_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/dataset\\ analysis/error_weighted/3/unguided/predictions_best/flocking/3/*.png dataset\\ analysis/unguided_3.gif\n\nconvert -delay 50 -loop 0 ../predictions_all/dataset\\ analysis/error_weighted/1/guided/predictions_best/flocking/1/*.png dataset\\ analysis/guided_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/dataset\\ analysis/error_weighted/2/guided/predictions_best/flocking/2/*.png dataset\\ analysis/guided_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/dataset\\ analysis/error_weighted/3/guided/predictions_best/flocking/3/*.png dataset\\ analysis/guided_3.gif\n\n# convert filtering method\nconvert -delay 50 -loop 0 ../predictions_all/pruning\\ od/flocking/1/*.png pruning\\ od/pruning_od_1.gif\nconvert -delay 50 -loop 0 ../predictions_all/pruning\\ od/flocking/2/*.png pruning\\ od/pruning_od_2.gif\nconvert -delay 50 -loop 0 ../predictions_all/pruning\\ od/flocking/3/*.png pruning\\ od/pruning_od_3.gif\n" }, { "alpha_fraction": 0.6934612989425659, "alphanum_fraction": 0.6946610808372498, "avg_line_length": 28.76785659790039, "blob_id": "f71261bde834b7e609f89053df97ec68ea16d57a", "content_id": "d6b7077a1fe8f912a5db018ef2b0b7f084d62254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 101, "num_lines": 56, "path": "/src/simulate.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# load configuration\nsource configuration.sh\n\nexport SIMULATION_SCRIPT=\"sampling/\"$ABM\"/predict_\"$ABM$SIMULATION_EXTENSION\n\n# remove existing data\nrm -f suggested_slps.txt\nrm -rf sampling/$ABM/images_*\n\n# for line in file\nwhile read -r line\ndo\n # read the number of alps per line\n export ALPS_PER_LINE=$(echo $line | tr -cd \")\" | wc -c)\n \n # remove all spaces in line\n # assume just one configuration per line, for now\n # fix this later by adding a loop\n export line=$(echo $line | sed \"s/ //g\")\n\n # simulate using suggested alps\n export SLP_CONFIGURATIONS=$($SIMULATION_TOOL $SIMULATION_SCRIPT $line)\n\n # in case of images, slp configurations are not returned\n # compute slp configuration from images\n if [ $IMAGES == 1 ]; then\n\t# all folders\n\texport SLP_CONFIGURATIONS=\"[\"\n\t\n\t# get the last image folder\n\t# this is the most recently created one\n\t# get as many lines as the number of alps in a row of suggested_alps.txt\n\texport SLP_IMAGES_FOLDERS=$(ls \"sampling/\"$ABM | sort | grep images_ | tail -n $ALPS_PER_LINE)\n\n\tfor folder in $SLP_IMAGES_FOLDERS\n\tdo\n\t # for each folder\n\t export SLP_CONFIGURATION=$(python sampling/utils/process_folder_images.py sampling/$ABM/$folder)\n\t export CHARACTER_COUNT=$(echo $SLP_CONFIGURATIONS | wc -c)\n\n\t if [ $CHARACTER_COUNT == 2 ]; then\n\t\t# skip first occurrence\n\t\texport SEPARATOR=\"\"\n\t else\n\t\texport SEPARATOR=\", \"\n\t fi\n\t \n\t export SLP_CONFIGURATIONS=$SLP_CONFIGURATIONS$SEPARATOR$SLP_CONFIGURATION\n\tdone\n\n\t# add closing bracket\n\texport SLP_CONFIGURATIONS=$SLP_CONFIGURATIONS\"]\"\n fi\n \n echo $SLP_CONFIGURATIONS >> suggested_slps.txt\ndone < suggested_alps.txt\n" }, { "alpha_fraction": 0.6227115988731384, "alphanum_fraction": 0.6569381952285767, "avg_line_length": 34.224300384521484, "blob_id": "141089750c1e1d743522c8da4b7ca8169c5e5edd", "content_id": "30743688501ce491903eea44f95e83b89162b92f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3769, "license_type": "no_license", "max_line_length": 111, "num_lines": 107, "path": "/src/sampling/utils/keras/conv_lstm.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "\"\"\" This script demonstrates the use of a convolutional LSTM network.\nThis network is used to predict the next frame of an artificially\ngenerated movie which contains moving squares.\n\"\"\"\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Convolution3D\nfrom keras.layers.convolutional_recurrent import ConvLSTM2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Dense, Activation\nfrom keras.layers.core import Reshape, Flatten\nimport numpy as np\nimport pylab as plt\n\nfrom skimage.io import imread\nfrom skimage.transform import resize\n\n# We create a layer which take as input movies of shape\n# (n_frames, width, height, channels) and returns a movie\n# of identical shape.\n\nseq = Sequential()\n\nseq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n input_shape=(None, 40, 40, 1),\n batch_input_shape=(None, 11, 40, 40, 1),\n border_mode='same', return_sequences=True))\nseq.add(BatchNormalization())\nseq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n border_mode='same', return_sequences=True))\nseq.add(BatchNormalization())\n\nseq.add(Flatten())\nseq.add(Dense(128))\nseq.add(Dense(1600*11))\nseq.add(Reshape((11, 40, 40, 1)))\n\nseq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n border_mode='same', return_sequences=True))\nseq.add(BatchNormalization())\nseq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n border_mode='same', return_sequences=True))\nseq.add(BatchNormalization())\nseq.add(Convolution3D(nb_filter=1, kernel_dim1=1, kernel_dim2=3,\n kernel_dim3=3, activation='sigmoid',\n border_mode='same', dim_ordering='tf'))\nseq.compile(loss='binary_crossentropy', optimizer='adadelta')\n\ndef image_data(file):\n img = imread(file, as_grey=True)\n img = resize(img, (40, 40))\n return img\n\n# Artificial data generation:\n# Generate movies with 3 to 7 moving squares inside.\n# The squares are of shape 1x1 or 2x2 pixels,\n# which move linearly over time.\n# For convenience we first create movies with bigger width and height (80x80)\n# and at the end we select a 40x40 window.\n\ndef generate_movies(n_samples=1200, n_frames=11, dataFolder=None):\n row = 40\n col = 40\n \n noisy_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float)\n shifted_movies = np.zeros((n_samples, 11, row, col, 1), dtype=np.float)\n\n for i in range(n_samples):\n # read custom dataset\n for t in range(n_frames):\n # read and resize to 40x40\n x = image_data(dataFolder + \"/\" + str(i) + \"_\" + str(200+t) + \".png\")\n shifted_movies[i, t, :, :, 0] = x\n \n # figure out how to add noise to this data\n noise_f = (-1)**np.random.randint(0, 2, size=(40,40))\n noisy_movies[i, t, :, :, 0] = np.dot(x, noise_f)\n \n return noisy_movies, shifted_movies\n\n# Train the network\nnoisy_movies, shifted_movies = generate_movies(n_samples=10, dataFolder=\"../../../../_swarm-lfd-data/Flocking\")\n\n# original nb_epoch = 500\n\nseq.fit(noisy_movies, shifted_movies, batch_size=10,\n nb_epoch=1, validation_split=0.05)\n\n# save model as JSON\nmodel = seq\nmodelAsJson = model.to_json()\nwith open(\"conv_lstm_model.json\", \"w\") as jsonFile:\n jsonFile.write(modelAsJson)\n# serialize weights to HDF5\nmodel.save_weights('conv_lstm_model.h5')\n\n'''\n# Testing the network on one movie\n# feed it with the first 7 positions and then\n# predict the new positions\nwhich = 1004\ntrack = noisy_movies[which][:7, ::, ::, ::]\n\nfor j in range(16):\n new_pos = seq.predict(track[np.newaxis, ::, ::, ::, ::])\n new = new_pos[::, -1, ::, ::, ::]\n track = np.concatenate((track, new), axis=0)\n'''\n" }, { "alpha_fraction": 0.6192708611488342, "alphanum_fraction": 0.6348958611488342, "avg_line_length": 36.28155517578125, "blob_id": "4d28b5aed7764cedb4a8ac825ad0c4d186e0922b", "content_id": "06d75d7b7bf87ef7eb4643f1dcd0b97d34278821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3840, "license_type": "no_license", "max_line_length": 110, "num_lines": 103, "path": "/snippets/flocking/plot_suggested_slps.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n#demonstrations = [\"a\", \"b\", \"c\"]\n#demonstrations = [\"_a\", \"_b\", \"_c\"]\n#demonstrations = [\"a_filtered\", \"b_filtered\", \"c_filtered\"]\n#demonstrations = [\"_a\"]\n#demonstrations = [\"a\"]\ndemonstrations = [\"a_filtered\"]\n\n#folderName = \"error_weighted\"\nfolderName = \"min_error\"\n\nnumberOfBins = 10\n\nwith open(\"../../src/sampling/flocking/scale_data_flocking_contour.txt\", \"r\") as inFile:\n allLines = inFile.readlines()\n maxValueArea = eval(allLines[0])[-2]\n maxValuePerimeter = eval(allLines[0])[-1]\n minValueArea = eval(allLines[1])[-2]\n minValuePerimeter = eval(allLines[1])[-1]\n\nfor index, item in enumerate(demonstrations):\n allPointsX = []\n allPointsY = []\n errors = []\n\n # scatter\n \n plt.figure()\n with open(folderName + \"/suggested_slps_\" + item + \".txt\", \"r\") as inFile:\n for line in inFile:\n point = eval(line)[0]\n plt.scatter(point[0], point[1], c=\"b\", marker=\"o\", alpha=0.3)\n allPointsX.append(point[0])\n allPointsY.append(point[1])\n\n # compute error\n with open(\"demonstration_slps.txt\", \"r\") as inFile:\n demonstration = list(eval(inFile.readlines()[index]))\n demonstration[0] = (demonstration[0]-minValueArea)/(maxValueArea-minValueArea)\n demonstration[1] = (demonstration[1]-minValuePerimeter)/(maxValuePerimeter-minValuePerimeter)\n\n _point = [x for x in point]\n _point[0] = (_point[0]-minValueArea)/(maxValueArea-minValueArea)\n _point[1] = (_point[1]-minValuePerimeter)/(maxValuePerimeter-minValuePerimeter)\n \n error = (_point[0]-demonstration[0])**2 + (_point[1]-demonstration[1])**2\n errors.append(error)\n \n plt.scatter(point[0], point[1], c=\"b\", marker=\"o\", alpha=0.3, label=\"Suggested SLPs\")\n\n with open(\"demonstration_slps.txt\", \"r\") as inFile:\n point = eval(inFile.readlines()[index])\n plt.scatter(point[0], point[1], c=\"r\", marker=\"s\", alpha=1.0, label=\"Demonstration SLPs\")\n\n plt.scatter(np.mean(allPointsX), np.mean(allPointsY), c=\"g\", marker=\"s\", alpha=1.0, label=\"Mean SLPs\")\n\n print np.mean(errors)\n print np.std(errors)\n \n ax = plt.gca()\n\n ticks = ax.get_xticks()\n minValue = minValueArea\n maxValue = maxValueArea\n scaledTicks = [(value-minValue)/(maxValue-minValue) for value in ticks]\n scaledTicks = [int(value*100)/100.0 for value in scaledTicks]\n ax.set_xticklabels(scaledTicks)\n \n ticks = ax.get_yticks()\n minValue = minValuePerimeter\n maxValue = maxValuePerimeter\n scaledTicks = [(value-minValue)/(maxValue-minValue) for value in ticks]\n scaledTicks = [int(value*100)/100.0 for value in scaledTicks]\n ax.set_yticklabels(scaledTicks)\n \n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Maximum Perimeter\")\n plt.legend()\n plt.savefig(folderName + \"/suggested_slps_\" + item)\n\n # histogram\n\n plt.figure()\n allPointsX = [(x-minValueArea)/(maxValueArea-minValueArea) for x in allPointsX]\n allPointsY = [(y-minValuePerimeter)/(maxValuePerimeter-minValuePerimeter) for y in allPointsY] \n histogram, xEdges, yEdges = np.histogram2d(allPointsX, allPointsY, bins=numberOfBins, range=[[0,1],[0,1]])\n plt.imshow(histogram, interpolation='nearest', origin='low')\n plt.ylabel(\"Maximum Area\")\n plt.xlabel(\"Maximum Perimeter\")\n plt.colorbar(label=\"Number of Points\")\n ax = plt.gca()\n\n # works for normalized values\n ticks = ax.get_xticks()\n ticks = [value/numberOfBins for value in ticks]\n ax.set_xticklabels(ticks)\n ticks = ax.get_yticks()\n ticks = [value/numberOfBins for value in ticks]\n ax.set_yticklabels(ticks)\n\n plt.savefig(folderName + \"/histogram_suggested_slps_\" + item)\n" }, { "alpha_fraction": 0.7653061151504517, "alphanum_fraction": 0.7653061151504517, "avg_line_length": 22.058822631835938, "blob_id": "8a08b6001be36accd0cab4f82e0c7fcf76ae8c2b", "content_id": "35b1b2cf3d2faa900f67278973de6649044f2377", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 392, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/src/all.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# this script does everything\n# it is for reference of order for running scripts \n\n# sample alp, slp pairs\n./sampling.sh\n\n# run the framework to suggest several alps\n# that would produce the demonstration slps\n./lfd.sh\n\n# simulate those alps\n# to check the actual slps that they correspond to\n./simulate.sh\n\n# filter from the suggested alps\n# use distance from demonstration slps\n./filter.sh\n" }, { "alpha_fraction": 0.6927609443664551, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 27.285715103149414, "blob_id": "1f367beb221899b69dff768fa432444a241ee570", "content_id": "26d57a014a12e3080863e4b1531d5912ad5cf827", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 81, "num_lines": 42, "path": "/src/sampling/utils/process_folder_images.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import sys\nimport featurize_image as fi\nimport glob\nimport os\nimport numpy as np\n\n# process a single folder\ndef process_folder(folderName):\n # get environment variables\n descriptorSetting = os.environ[\"DESCRIPTOR_SETTING\"]\n numDependent = eval(os.environ[\"NUM_DEPENDENT\"])\n\n # load feature extractor if needed\n net = fi.get_network(descriptorSetting)\n \n # initialize collection of dependent values\n dependentValuesFolder = []\n\n # expect everything in this folder to be an image\n for imageName in glob.glob(folderName + \"/*.*\"):\n # read image\n print >> sys.stderr, (\"now processing \" + imageName)\n \n # define dependent values\n dependentValuesImage = fi.get_features(net, imageName, descriptorSetting)\n\n # append to list\n dependentValuesFolder.append(dependentValuesImage)\n\n dependentValuesFolder = np.mean(dependentValuesFolder, axis=0)\n\n return tuple(dependentValuesFolder)\n\ndef main():\n folderName = sys.argv[1]\n dependentValuesFolder = process_folder(folderName)\n\n print dependentValuesFolder\n \nif __name__ == \"__main__\":\n # stuff only to run when not called via 'import' here\n main()\n" }, { "alpha_fraction": 0.594718337059021, "alphanum_fraction": 0.6133802533149719, "avg_line_length": 33.20481872558594, "blob_id": "12c0aaeaf20be00b0d3d582f52f094d74f35a25c", "content_id": "1f4673ce4f279190e5181de29c228ab9faa40146", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2840, "license_type": "no_license", "max_line_length": 109, "num_lines": 83, "path": "/data/domaindata/compute_statistics.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import numpy as np\n\n# results from batch data\nRESULTS_FILE = \"results_flocking.csv\"\nINPUT_NAME =\"\\\"Input.gifPrefix\\\"\"\n\ndef is_integer(x):\n try:\n int(x)\n return True\n except:\n return False\n\n# main code\nwith open(RESULTS_FILE) as results:\n headers = results.readline()\n\n #print headers\n \n headers = headers.split(\",\")\n inputIndex = headers.index(INPUT_NAME)\n answer1Index = inputIndex + 1\n answer2Index = inputIndex + 2\n answer3Index = inputIndex + 3\n answer4Index = inputIndex + 4\n\n # increment all by 3 for some reason\n # to get correct values in data\n inputIndex += 3\n answer1Index += 3\n answer2Index += 3\n answer3Index += 3\n answer4Index += 3\n \n # read result data\n resultData = {}\n for line in results:\n inputData = line.split(\",\")[inputIndex]\n answer1 = line.split(\",\")[answer1Index]\n answer2 = line.split(\",\")[answer2Index]\n answer3 = line.split(\",\")[answer3Index]\n answer4 = line.split(\",\")[answer4Index]\n\n #print line\n #print inputData\n #print answer1, answer2, answer3, answer4\n\n if(inputData not in resultData.keys()):\n resultData[inputData] = {}\n if(\"answer1\" not in resultData[inputData].keys()):\n resultData[inputData][\"answer1\"] = []\n if(\"answer2\" not in resultData[inputData].keys()):\n resultData[inputData][\"answer2\"] = []\n if(\"answer3\" not in resultData[inputData].keys()):\n resultData[inputData][\"answer3\"] = []\n\n # check control question\n if(eval(eval(answer4)) < 8):\n # control question failed\n print \"control question failed: \" + answer4\n continue\n\n # check for invalid responses\n invalidResponse = [type(eval(eval(x))) is not int for x in [answer1, answer2, answer3, answer4]]\n if(np.any(invalidResponse) == True):\n # invalid response\n print \"invalid response: \" + str([answer1, answer2, answer3, answer4])\n \n resultData[inputData][\"answer1\"].append(eval(answer1))\n resultData[inputData][\"answer2\"].append(eval(answer2))\n resultData[inputData][\"answer3\"].append(eval(answer3))\n\n # now process result data\n for inputData in resultData.keys():\n print inputData\n answerList = []\n for answer in resultData[inputData].keys():\n resultData[inputData][answer] = [eval(x) for x in resultData[inputData][answer] if is_integer(x)]\n #resultData[inputData][answer] = np.mean(resultData[inputData][answer])\n resultData[inputData][answer] = np.std(resultData[inputData][answer])\n print answer, resultData[inputData][answer]\n answerList.append(resultData[inputData][answer])\n print \"mean: \", np.mean(answerList)\n\n" }, { "alpha_fraction": 0.6615158319473267, "alphanum_fraction": 0.6857984066009521, "avg_line_length": 27.11724090576172, "blob_id": "59eda82a02810568e0283b7b641928c11df11c13", "content_id": "438233b2382da77003e30b8fdd85da3f83593bdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4077, "license_type": "no_license", "max_line_length": 89, "num_lines": 145, "path": "/src/sampling/utils/keras/variational_autoencoder_model.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "'''This script demonstrates how to build a variational autoencoder with Keras.\n\nReference: \"Auto-Encoding Variational Bayes\" https://arxiv.org/abs/1312.6114\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nfrom skimage.io import imread\nfrom skimage.transform import resize\n\nfrom keras.layers import Input, Dense, Lambda\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import objectives\nfrom keras.datasets import mnist\nfrom keras.models import model_from_json\n\nimport math\n\n#batch_size = 100\nbatch_size = 1\noriginal_dim = 784\nlatent_dim = 128\nintermediate_dim = 256\n#nb_epoch = 50\nnb_epoch = 1\nepsilon_std = 0.01\n\nx = Input(batch_shape=(batch_size, original_dim))\nh = Dense(intermediate_dim, activation='relu')(x)\nz_mean = Dense(latent_dim)(h)\nz_log_var = Dense(latent_dim)(h)\n\n\ndef sampling(args):\n z_mean, z_log_var = args\n\n # karan\n #batch_size = 100\n #latent_dim = 128\n #epsilon_std = 0.01\n \n epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,\n std=epsilon_std)\n return z_mean + K.exp(z_log_var / 2) * epsilon\n\n# note that \"output_shape\" isn't necessary with the TensorFlow backend\nz = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])\n\n# we instantiate these layers separately so as to reuse them later\ndecoder_h = Dense(intermediate_dim, activation='relu')\ndecoder_mean = Dense(original_dim, activation='sigmoid')\nh_decoded = decoder_h(z)\nx_decoded_mean = decoder_mean(h_decoded)\n\n\ndef vae_loss(x, x_decoded_mean):\n xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)\n kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n return xent_loss + kl_loss\n\nvae = Model(x, x_decoded_mean)\nvae.compile(optimizer='rmsprop', loss=vae_loss)\n\n# train the VAE on MNIST digits\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# load all images in folder\n\ndef image_data(file):\n\timg = imread(file, as_grey=True)\n img = resize(img, (28, 28))\n img = img.flatten()\n return img\n\ndef folder_data(folder):\n\tfiles = glob.glob(folder + \"/*.png\")\n\tprint(\"Calculating descriptors. Number of images is\", len(files))\n\treturn [image_data(file) for file in files]\n\nfolder = \"images/1108\"\n#folder = \"../../../../_swarm-lfd-data/Flocking\"\nx_folder = folder_data(folder)\nsplitPoint = int(math.ceil(len(x_folder)*2.0/3.0))\nx_train = x_folder[:splitPoint]\nx_test = x_folder[splitPoint:]\n\n# artificially increase size of lists\n#x_train *= 100\n#x_test *= 100\n\nx_train = np.array(x_train)\nx_test = np.array(x_test)\n\nprint(len(x_train))\nprint(len(x_test))\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\nx_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n\nprint(x_train.shape)\nprint(x_train.shape[1:])\n\nvae.fit(x_train, x_train,\n shuffle=True,\n nb_epoch=nb_epoch,\n batch_size=batch_size,\n validation_data=(x_test, x_test))\n\n'''\n# save model as JSON\nmodel = vae\nmodelAsJson = model.to_json()\nwith open(\"variational_autoencoder_model.json\", \"w\") as jsonFile:\n jsonFile.write(modelAsJson)\n# serialize weights to HDF5\nmodel.save_weights('variational_autoencoder_model.h5')\n'''\n\n'''\n# load json and create model\njson_file = open('variational_autoencoder_model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(\"variational_autoencoder_model.h5\")\n\n# get output for specific layer\n# with a Sequential model\nmodel = loaded_model\nprint(len(model.layers))\n\nimage = image_data(\"images/1108/1108_200.png\")\nimage = image.astype('float32') / 255.\nimage = image.reshape((1, np.prod(x_train.shape[1:])))\n\nget_layer_output = K.function([model.layers[0].input, K.learning_phase()],\n [model.layers[3].output])\n\nlayer_output = get_layer_output([image, 0])[0]\nprint(len(layer_output[0]))\n'''\n" }, { "alpha_fraction": 0.7346625924110413, "alphanum_fraction": 0.7346625924110413, "avg_line_length": 30.047618865966797, "blob_id": "ccf0ccb753afb63d35c23cf504ea9943b2c7a1ad", "content_id": "f4408024e0b759e7488b6f73af44748bd904bdd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/src/plot_feedback.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwith open(\"feedback_output_difference.txt\") as inFile:\n difference = inFile.readlines()\n difference = [eval(x) for x in difference]\n print >> sys.stderr, np.sum(difference)\nplt.figure()\nplt.plot(difference, label=\"difference\")\nplt.legend()\nplt.savefig(\"feedback_output_difference.png\")\n\nwith open(\"feedback_input_difference.txt\") as inFile:\n difference = inFile.readlines()\n difference = [eval(x) for x in difference]\n print >> sys.stderr, np.sum(difference)\nplt.figure()\nplt.plot(difference, label=\"difference\")\nplt.legend()\nplt.savefig(\"feedback_input_difference.png\")\n" }, { "alpha_fraction": 0.6097992658615112, "alphanum_fraction": 0.6257379055023193, "avg_line_length": 20.71794891357422, "blob_id": "2f509e1321a4c951de5d0934a940ef1d429544c7", "content_id": "beea8a739a0d74e34470bdd642aa59286529886b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1694, "license_type": "no_license", "max_line_length": 66, "num_lines": 78, "path": "/src/sampling/schelling/predict_schelling.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import SchellingModel\nimport random\nimport sys\nimport numpy as np\n\n# only use three of the several ALPs\n# density (0,1)\n# minority_pc (0,1)\n\n# use one model-level SLP\n# happy\n\n# constant ALPs\nHEIGHT = 20\nWIDTH = 20\nHOMOPHILY = 4\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 2\nDEPENDENT_VARIABLES = 1\nSAMPLES = 10\nTIME_LAPSE = 1000\n\ndef main_schelling(density, minorityPc):\n # instantiate and run model\n model = SchellingModel(\n height=HEIGHT,\n width=WIDTH,\n density=density,\n minority_pc=minorityPc,\n homophily=HOMOPHILY)\n\n # run time lapse\n for i in range(TIME_LAPSE):\n try:\n # step\n model.step()\n except:\n # no empty cells\n pass\n \n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n try:\n # step\n model.step()\n except:\n # saturated\n # no empty cells\n pass\n\n # read data\n data = model.datacollector.get_model_vars_dataframe() \n dependentValues.append(np.mean(list(data.happy)[-SAMPLES-1:]))\n\n return dependentValues\n \ndef main():\n # simulate using specific ALPs\n alpConfigurationList = eval(sys.argv[1])\n\n # store corresponding slps\n slpConfigurationList = []\n \n for alpConfiguration in alpConfigurationList:\n [density, minorityPc] = alpConfiguration\n \n # run model using those ALPs\n dependentValues = main_schelling(density, minorityPc)\n slpConfigurationList.append(dependentValues)\n\n print(slpConfigurationList)\n\nmain()\n" }, { "alpha_fraction": 0.6643073558807373, "alphanum_fraction": 0.6723963618278503, "avg_line_length": 28.08823585510254, "blob_id": "c60013c803d1d6cc6161f9ad0b394b6365866421", "content_id": "08ff94a342466b67f7faef2c5f2157a1a3a6fca0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "no_license", "max_line_length": 97, "num_lines": 34, "path": "/data/domaindata/reduce.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.decomposition import KernelPCA\nimport cPickle\n\nnumDependent = 50\n\n# read input\nwith open(\"processed_turbulence_vgg.txt\", \"r\") as inFile:\n lines = inFile.readlines()[1:]\n lines = [[eval(x) for x in line.split(\" \")] for line in lines]\n\nlines = np.array(lines)\nalps = lines[:,:2]\nslps = lines[:,2:]\nkpca = KernelPCA(n_components=numDependent, kernel=\"rbf\")\nmodel = kpca.fit(slps)\nslpsKpca = model.transform(slps)\nxKpca = np.concatenate((alps, slpsKpca), axis=1)\n\nrows, columns = xKpca.shape\n\n# write output\nwith open(\"processed_turbulence_vgg_reduced.txt\", \"w\") as outFile:\n outFile.write(\"i\"*2 + \"d\"*numDependent)\n\n for rowIndex in range(rows):\n string = \"\\n\"\n for columnIndex in range(columns):\n string += str(xKpca[rowIndex][columnIndex]) + \" \"\n string = string[:-1]\n outFile.write(string)\n\n# save transform\ncPickle.dump(model, open(\"../../src/sampling/utils/transforms/turbulence_reduced_model.p\", \"wb\"))\n" }, { "alpha_fraction": 0.6873920559883118, "alphanum_fraction": 0.6925734281539917, "avg_line_length": 24.130434036254883, "blob_id": "957e66a619fa6e0f75520edde500d22267dd878c", "content_id": "f62f510793c3d6900bfd2cad563db98bf7e558cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 579, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/src/do_feedback.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# clean\nrm -f feedback_*.txt\nrm -f suggested_*.txt\nrm -f queried_*.txt\n\n# run multiple times to generate multiple sets of images\nexport ITERATIONS=100\n\nfor i in $(seq \"$ITERATIONS\")\ndo\n # store the current iteration value\n echo $i > feedback_iteration.txt\n \n # run experiment\n # simulate results\n ./lfd.sh && ./simulate.sh\n\n # observe data for slps\n # append to file\n cat suggested_slps.txt >> feedback_suggested_slps.txt\n cat queried_slps.txt >> feedback_queried_slps.txt\n cat regression_parameter.txt >> feedback_regression_parameter.txt\ndone\n\n" }, { "alpha_fraction": 0.5935162305831909, "alphanum_fraction": 0.6084787845611572, "avg_line_length": 32.072166442871094, "blob_id": "a433d1c05e443d18949c083047c0db92919fd502", "content_id": "23ac65e121d2a200e5cf53c09a4ae04d5e00b15f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3208, "license_type": "no_license", "max_line_length": 106, "num_lines": 97, "path": "/snippets/eum/plot_suggested_slps.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n#demonstrations = [\"a\", \"b\", \"c\"]\n#demonstrations = [\"_a\", \"_b\", \"_c\"]\ndemonstrations = [\"a\"]\n#demonstrations = [\"_a\"]\n#demonstrations = [\"a_filtered\"]\n\nfolderName = \"error_weighted\"\n#folderName = \"min_error\"\n\nnumberOfBins = 10\n\nwith open(\"../../src/sampling/eum/scale_data_eum.txt\", \"r\") as inFile:\n allLines = inFile.readlines()\n maxValueMean = eval(allLines[0])[-2]\n maxValueMedian = eval(allLines[0])[-1]\n minValueMean = eval(allLines[1])[-2]\n minValueMedian = eval(allLines[1])[-1]\n\nfor index, item in enumerate(demonstrations):\n allPointsX = []\n allPointsY = []\n errors = []\n \n # scatter\n \n plt.figure()\n with open(folderName + \"/suggested_slps_\" + item + \".txt\", \"r\") as inFile:\n for line in inFile:\n try:\n point = eval(line)[0]\n plt.scatter(point[0], point[1], c=\"b\", marker=\"o\", alpha=0.3)\n allPointsX.append(point[0])\n allPointsY.append(point[1])\n\n # compute error\n with open(\"demonstration_slps.txt\", \"r\") as inFile:\n demonstration = eval(inFile.readlines()[index])\n error = (point[0]-demonstration[0])**2 + (point[1]-demonstration[1])**2\n errors.append(error)\n \n except:\n continue\n \n plt.scatter(point[0], point[1], c=\"b\", marker=\"o\", alpha=0.3, label=\"Suggested SLPs\")\n\n with open(\"demonstration_slps.txt\", \"r\") as inFile:\n point = eval(inFile.readlines()[index])\n plt.scatter(point[0], point[1], c=\"r\", marker=\"s\", alpha=1.0, label=\"Demonstration SLPs\")\n \n plt.scatter(np.mean(allPointsX), np.mean(allPointsY), c=\"g\", marker=\"s\", alpha=1.0, label=\"Mean SLPs\")\n\n print np.mean(errors)\n print np.std(errors)\n \n ax = plt.gca()\n\n ticks = ax.get_xticks()\n minValue = minValueMean\n maxValue = maxValueMean\n scaledTicks = [(value-minValue)/(maxValue-minValue) for value in ticks]\n scaledTicks = [int(value*100)/100.0 for value in scaledTicks]\n ax.set_xticklabels(scaledTicks)\n \n ticks = ax.get_yticks()\n minValue = minValueMedian\n maxValue = maxValueMedian\n scaledTicks = [(value-minValue)/(maxValue-minValue) for value in ticks]\n scaledTicks = [int(value*100)/100.0 for value in scaledTicks]\n ax.set_yticklabels(scaledTicks)\n\n plt.ylabel(\"Median\")\n plt.xlabel(\"Mean\")\n plt.legend()\n plt.savefig(folderName + \"/suggested_slps_\" + item)\n\n # histogram\n \n plt.figure()\n histogram, xEdges, yEdges = np.histogram2d(allPointsX, allPointsY, bins=numberOfBins)\n plt.imshow(histogram, interpolation='nearest', origin='low')\n plt.xlabel(\"Mean Final Position\")\n plt.ylabel(\"Median Final Position\")\n plt.colorbar(label=\"Number of Points\")\n ax = plt.gca()\n\n # works for normalized values\n ticks = ax.get_xticks()\n ticks = [value/numberOfBins for value in ticks]\n ax.set_xticklabels(ticks)\n ticks = ax.get_yticks()\n ticks = [value/numberOfBins for value in ticks]\n ax.set_yticklabels(ticks)\n\n plt.savefig(folderName + \"/histogram_suggested_slps_\" + item)\n" }, { "alpha_fraction": 0.6872605085372925, "alphanum_fraction": 0.7131226062774658, "avg_line_length": 30.1641788482666, "blob_id": "b8c6d0c896548676b6ee1e7450744ec14a4bc566", "content_id": "318d23cb9b11601480f20c3fd535fa0566bf5212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2088, "license_type": "no_license", "max_line_length": 74, "num_lines": 67, "path": "/src/sampling/utils/keras/mnist_cnn_model.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "'''Trains a simple convnet on the MNIST dataset.\n\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import model_from_json\nfrom skimage.io import imread\nfrom skimage.transform import resize\n\n# load json and create model\njson_file = open('mnist_cnn_model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(\"mnist_cnn_model.h5\")\nprint(\"Loaded model from disk\")\n\n# the data, shuffled and split between train and test sets\n# input image dimensions\nnb_classes = 10\nimg_rows, img_cols = 28, 28\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\nX_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\nX_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\n# test model\nmodel = loaded_model\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n# import image and resize\nimage = imread(\"409_200.png\", as_grey=True)\nimage = resize(image, (28, 28))\n\nfrom skimage.io import imshow\n\nimage = image.reshape(1, 1, 28, 28)\nprint(image.shape)\nclasses = model.predict_classes(image)\nprint(classes)\n\n# get output for specific layer\nfrom keras import backend as K\n# with a Sequential model\nprint(len(model.layers))\nget_layer_output = K.function([model.layers[0].input, K.learning_phase()],\n [model.layers[7].output])\nlayer_output = get_layer_output([image, 0])[0]\nprint(layer_output)\n" }, { "alpha_fraction": 0.7064123153686523, "alphanum_fraction": 0.7095919251441956, "avg_line_length": 34.60377502441406, "blob_id": "f5c21909f4bdf333f86c6c98da59f185db0f0011", "content_id": "2a7d3206d3758b6fad54c07d67e94b4f212fcf1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1887, "license_type": "no_license", "max_line_length": 116, "num_lines": 53, "path": "/src/sampling/sampling_process_raw.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# copy raw data file if needed\nif [ $IMAGES == 1 ]; then\n # raw data file is present in non-repository directory\n cp ../../../_swarm-lfd-data/$ABM/$ABM.txt .\nfi\n\n# separate head from rest of data\nhead -n 1 $ABM.txt > header.txt\ntail -n +2 $ABM.txt > $ABM\"_.txt\"\nmv $ABM\"_.txt\" $ABM.txt\n\n# process images if any\n# check if this abm depends on images\nif [ $IMAGES == 1 ]; then\n # images and raw data file are present in non-repository directory\n mv ../../../_swarm-lfd-data/$ABM/images $ABM/\n python utils/process_training_images.py\n mv $ABM/images ../../../_swarm-lfd-data/$ABM/\nfi\n\n# extract independent component\n# generate dependent component\n# combine results\n# overwrite header file\nexport INDEPENDENT_HEADER=\"i\"\nexport DEPENDENT_HEADER=\"d\"\nexport INDEPENDENT_HEADER_STRING=$(sed \"s/[^\"$INDEPENDENT_HEADER\"']//g\" <<< cat header.txt)\nexport DEPENDENT_HEADER_STRING=$(printf $DEPENDENT_HEADER {1..$NUM_DEPENDENT})\necho $INDEPENDENT_HEADER_STRING$DEPENDENT_HEADER_STRING > header.txt\n\n# prepare data for amf input\necho \"==>preparing data for amf\"\npython ../map.py $ABM.txt $ABM > processed_$ABM.txt\ncat header.txt processed_$ABM.txt > temp.txt\nmv temp.txt processed_$ABM.txt\nrm header.txt\nmv processed_$ABM.txt ../../data/domaindata\n\n# if image processing was used, append descriptor setting to processed filename\nif [ $IMAGES == 1 ]; then\n # generate lower case descriptor name\n export DESCRIPTOR_SETTING_LOWER=$(echo \"$DESCRIPTOR_SETTING\" | tr '[:upper:]' '[:lower:]')\n mv ../../data/domaindata/processed_$ABM.txt ../../data/domaindata/processed_$ABM\"_\"$DESCRIPTOR_SETTING_LOWER.txt\nfi\n\n# this file is no longer needed for computation\n# the forward and reverse mappings are built from the processed file\necho \"==>cleaning\"\nrm $ABM.txt\n\n# move scale data to the abm folder\n# assume only one scale data file exists here right now\nmv scale_data_$ABM*.txt $ABM\n" }, { "alpha_fraction": 0.6077530384063721, "alphanum_fraction": 0.6122422218322754, "avg_line_length": 44.515625, "blob_id": "e38b994312427bd1fb97cca60eade85e1263f253", "content_id": "13a7dbdd0afcb62d7bc875cb3eafbe47016f7074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37869, "license_type": "no_license", "max_line_length": 285, "num_lines": 832, "path": "/src/lfd.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# import libraries\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport sys\nimport amf\nimport amf.misc as misc\n\nfrom sklearn import svm\nfrom sklearn.cluster import KMeans\nimport scipy.io as sio\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nimport os\nimport time\nimport random\n\nimport cPickle\nimport gzip\nimport numpy as np\nfrom sets import Set\n\nimport sampling.utils.featurize_image as fi\nimport sampling.utils.outlier_detection as od\n\n# constants\nPLOT_COLOR = {0: \"r\", 1: \"b\", 2: \"g\", 3: \"c\", 4: \"m\", 5: \"y\", 6: \"k\"}\nPLOT_SHAPE = {0: \"o\", 1: \"^\", 2: \"s\", 3: \"d\", 4: \"+\", 5: \"v\", 6: \"*\"}\nPLOT_DEMONSTRATIONS = {0: \"a\", 1: \"b\", 2: \"c\", 3: \"d\", 4: \"e\", 5: \"f\", 6: \"g\"}\n\nRUNS_PER_SLPS = 1\n\nIMAGE_FEATURE_PATH_PREFIX = \"sampling/\"\n\n# experiment class\nclass Experiment(object):\n\n # initialize experiment parameters based on file\n def __init__(self):\n # read configuration\n configurationFileName = os.environ[\"CONFIGURATION_FILE\"]\n configuration = open(configurationFileName).read()\n execd = {}\n exec(configuration, execd)\n\n self.trainingSize = execd['TRAINING_SIZE']\n self.validationSize = execd['VALIDATION_SIZE']\n self.rmGranularity = execd['RM_GRANULARITY']\n self.numIndependent = execd['NUM_INDEPENDENT']\n self.regression = execd['REGRESSION']\n\n # changes for feedback\n if(eval(os.environ[\"FEEDBACK\"]) == 1):\n # randomness causes too much noise for feedback\n self.validationSize = 0\n\n # modify regression parameter\n if(os.path.isfile(\"regression_parameter.txt\") == True):\n with open(\"regression_parameter.txt\", \"r\") as inFile:\n regressionParameter = inFile.readlines()[0]\n #self.regression = self.regression.replace(\"()\", \"(alpha=np.exp((-1*\" + regressionParameter + \")))\")\n self.regression = self.regression.replace(\"()\", \"(n_neighbors=int(np.floor(\" + regressionParameter + \")))\")\n \n # get environment variables\n self.abm = os.environ[\"ABM\"]\n self.demonstrationFolder = os.environ[\"DEMONSTRATION_FOLDER\"]\n self.descriptorSetting = os.environ[\"DESCRIPTOR_SETTING\"]\n self.numDependent = eval(os.environ[\"NUM_DEPENDENT\"])\n self.dataFileName = os.environ[\"DATA_FILE\"]\n self.scaleDataFileName = os.environ[\"SCALE_DATA_FILE\"]\n self.images = eval(os.environ[\"IMAGES\"])\n self.writeToDisk = eval(os.environ[\"WRITE_TO_DISK\"])\n \n # reverse mapping configuration: ranges of agent level parameters\n self.rmConfiguration = tuple()\n for i in range(self.numIndependent):\n self.rmConfiguration += ((0.0, 1.0), )\n self.rmConfiguration = list(self.rmConfiguration)\n\n # for plotting\n self.rmQueryCount = 0\n \n # mapping models\n self.fm = None\n self.rm = None\n \n # retrieve reverse mapping\n def get_mappings(self, slpsList, saveData=False):\n # parse slps list\n demonstrationDescriptors = [depValues for depValues, plotColor, plotShape in slpsList]\n \n # load data\n print >> sys.stderr, \"->Loading data set \" + self.dataFileName\n fullData = amf.data.load(self.dataFileName)\n \n # forward mapping using largest training size\n print >> sys.stderr, \"->Trainining FM\"\n trainingSize = self.trainingSize\n dataset, validation = amf.data.random_subsets(fullData, [trainingSize, self.validationSize], demonstrationDescriptors)\n fm = amf.ForwardMapping(self.regression, self.numDependent)\n fm.train(dataset)\n \n if(saveData == True):\n # gather ALPs and SLPs\n # plot them in space to observe distribution\n alpPoints = [item[0] for item in dataset]\n slpPoints = [item[1] for item in dataset]\n c = \"r\"\n m = \"o\"\n \n # save ALP dataset\n with open(\"alp_dataset_\" + str(int(time.time())) + \".txt\", \"w\") as alpDatasetFile:\n for alpPoint in alpPoints:\n alpDatasetFile.write(str(alpPoint) + \"\\n\")\n \n # save SLP dataset\n with open(\"slp_dataset_\" + str(int(time.time())) + \".txt\", \"w\") as slpDatasetFile:\n for slpPoint in slpPoints:\n slpDatasetFile.write(str(slpPoint) + \"\\n\")\n\n # plot ALPs\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plotName = \"alp_points_\" + str(int(time.time())) + \".png\"\n alpPoints = zip(*alpPoints)\n [xs, ys, zs] = alpPoints\n ax.scatter(xs, ys, zs, c=c, marker=m)\n ax.set_xlabel(\"Max-align-turn\")\n ax.set_ylabel(\"Max-cohere-turn\")\n ax.set_zlabel(\"Max-separate-turn\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"alp_points_dim1_\" + str(int(time.time())) + \".png\"\n plt.plot(xs, xs, c+m)\n plt.xlabel(\"ALP Dimension 1\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"alp_points_dim2_\" + str(int(time.time())) + \".png\"\n plt.plot(ys, ys, c+m)\n plt.xlabel(\"ALP Dimension 2\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"alp_points_dim3_\" + str(int(time.time())) + \".png\"\n plt.plot(zs, zs, c+m)\n plt.xlabel(\"ALP Dimension 3\")\n plt.savefig(plotName)\n plt.close(fig)\n \n # plot SLPs\n fig = plt.figure()\n plotName = \"slp_points_\" + str(int(time.time())) + \".png\"\n slpPoints = zip(*slpPoints)\n [xs, ys] = slpPoints\n plt.plot(xs, ys, c+m)\n plt.xlabel(\"SLP Dimension 1\")\n plt.ylabel(\"SLP Dimension 2\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"slp_points_dim1_\" + str(int(time.time())) + \".png\"\n plt.plot(xs, xs, c+m)\n plt.xlabel(\"SLP Dimension 1\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"slp_points_dim2_\" + str(int(time.time())) + \".png\"\n plt.plot(ys, ys, c+m)\n plt.xlabel(\"SLP Dimension 2\")\n plt.savefig(plotName)\n plt.close(fig)\n\n # reverse mapping using largest granularity\n print >> sys.stderr, \"->Trainining RM\"\n granularity = self.rmGranularity\n rm = amf.ReverseMapping(fm, self.rmConfiguration, granularity, slpsList)\n\n return [fm, rm]\n\n # query reverse mapping for configurations\n def query_rm(self, fm, rm, slps, ax, c, m, plotConfigurations=False):\n intersections = rm.all_intersections(slps)\n allConfigurations = []\n\n #\n # filter from all query results\n # \n \n simplexNumbers = []\n edgeNumbers = []\n _allConfigurations = []\n\n # check for special case\n # if wiggle happened, then only one configuration is returned\n if(len(intersections[0]) == 1):\n # configuration corresponds to simplex number 0 and edge number 0\n # an intersection is stored as [simplexNumber][edgeNumber][configuration]\n if(type(intersections[0][0][0]) is tuple):\n # no simplex number is returned in this case\n wiggledConfiguration = intersections[0][0][0]\n else:\n # different formats of return\n # in this case, a default simplex and edge number is attached\n # not sure why\n wiggledConfiguration = intersections[0][0][1][0][1]\n \n allConfigurations = [list(wiggledConfiguration)] \n return allConfigurations\n \n for intersection in intersections:\n currentEdgeNumbers = {}\n _configurations = {}\n simplexNumbers.append([simplexNumber for (simplexNumber, configurations) in intersection])\n \n for simplexNumber, configurations in intersection:\n currentEdgeNumbers[simplexNumber] = [edgeNumber for (edgeNumber, configuration) in configurations]\n _configurations[simplexNumber] = {}\n for edgeNumber, configuration in configurations:\n allConfigurations.append(configuration)\n _configurations[simplexNumber][edgeNumber] = configuration\n \n edgeNumbers.append(currentEdgeNumbers)\n _allConfigurations.append(_configurations)\n \n # get common simplexes\n simplexNumbers = [Set(numbers) for numbers in simplexNumbers]\n commonSimplexes = simplexNumbers[0]\n for index in range(1, len(simplexNumbers)):\n currentSimplexes = simplexNumbers[index]\n commonSimplexes = commonSimplexes.intersection(currentSimplexes)\n\n # get common edges\n commonEdges = {}\n for key in commonSimplexes:\n currentCommonEdges = Set(edgeNumbers[0][key])\n for index in range(1, len(edgeNumbers)):\n currentEdges = Set(edgeNumbers[index][key])\n currentCommonEdges = currentCommonEdges.intersection(currentEdges)\n commonEdges[key] = currentCommonEdges\n overlapCount = [len(values) for values in list(commonEdges.values())]\n \n # filter common edges\n #overlapAmount = [len(commonEdges[key]) for key in commonEdges]\n #print >> sys.stderr, overlapAmount\n #plt.figure()\n #plt.hist(overlapAmount)\n #plt.savefig(\"overlapAmount_before_filtering.png\")\n\n if(len(overlapCount) > 0):\n #plt.figure()\n #plt.hist(overlapCount)\n #plt.show()\n #sys.exit()\n\n overlapMean = np.mean(overlapCount)\n commonEdges = {key : commonEdges[key] for key in commonEdges if len(commonEdges[key]) >= overlapMean}\n commonSimplexes = list(commonEdges.keys())\n\n #overlapAmount = [len(commonEdges[key]) for key in commonEdges]\n #print >> sys.stderr, overlapAmount\n #plt.figure()\n #plt.hist(overlapAmount)\n #plt.savefig(\"overlapAmount_after_filtering.png\")\n #sys.exit()\n \n # now we need to cycle through the configurations again\n # only pick ones corresponding to filtered edges\n # TODO: we may want to combine configurations across SLP intersections for optimality. can we?\n allConfigurationsCommon = []\n for commonSimplex in commonSimplexes:\n currentCommonEdges = commonEdges[commonSimplex]\n for commonEdge in currentCommonEdges:\n for alpIndex in range(len(simplexNumbers)):\n currentConfiguration = _allConfigurations[alpIndex][commonSimplex][commonEdge]\n #print >> sys.stderr, currentConfiguration\n allConfigurationsCommon.append(currentConfiguration)\n\n # filter configurations based on common simplexes\n if(eval(os.environ[\"CONFIGURATIONS_PRUNING\"]) == 1):\n allConfigurations = allConfigurationsCommon\n else:\n # no overlapping edges found\n # pruning cannot be used in this case\n pass\n\n # outlier detection code\n # outlier detection based on hubness\n # approximate labeling based on minority clustering\n\n if(eval(os.environ[\"CONFIGURATIONS_OUTLIER_DETECTION\"]) == 1):\n # outlier detection only valid for a dataset of minimum size\n # meanshift clustering sometimes fails if it somehow finds nans\n try:\n # do outlier detection\n [odPoints, labels] = od.get_approximate_labels(allConfigurations)\n [odPoints, labels] = od.do_lpod(odPoints, labels)\n \n allConfigurationsOd = []\n for configurationIndex in range(len(allConfigurations)):\n currentConfiguration = odPoints[configurationIndex] \n if(labels[configurationIndex] == 0):\n # not an outlier\n allConfigurationsOd.append(list(currentConfiguration))\n \n allConfigurations = allConfigurationsOd\n except:\n # outlier detection had an error\n # it is not used in this case\n pass\n \n # add solution to scatter plot\n if(plotConfigurations == True):\n print >> sys.stderr, \"->Plotting configurations\" \n allXs = []\n allYs = []\n allZs = []\n allErrors = []\n configurationsToPlot = []\n \n for configuration in allConfigurations:\n error = misc.list_sub(slps, fm.predict(configuration))\n error = sum([x**2 for x in error])\n configurationsToPlot.append((error, configuration))\n configurationsToPlot.sort() \n\n # filter configurations here\n # checking threshold effect\n allConfigurationsToPlot = [x for x in configurationsToPlot]\n histogramData = [error for error, configuration in allConfigurationsToPlot]\n mu = np.mean(histogramData)\n sigma = np.std(histogramData)\n #configurationsToPlot = [x for x in configurationsToPlot if (x[0] < (mu + 0.1*sigma) and x[0] > (mu - 0.1*sigma))]\n \n for error, configuration in configurationsToPlot:\n xs, ys, zs = configuration\n allXs.append(xs)\n allYs.append(ys)\n allZs.append(zs)\n allErrors.append(error)\n\n odPointsAllXs = [x[0] for x in odPoints]\n odPointsAllYs = [x[1] for x in odPoints]\n odPointsAllZs = [x[2] for x in odPoints]\n \n # plot point errors in 3d space \n plot = ax.scatter(allXs, allYs, allZs, c=allErrors, marker=m, cmap=\"cool\")\n plt.colorbar(plot, label=\"FM Prediction Error\")\n ax.set_xlabel(\"Max-align-turn\")\n ax.set_ylabel(\"Max-cohere-turn\")\n ax.set_zlabel(\"Max-separate-turn\")\n plt.savefig(\"solution_space.png\")\n\n # plot point labels in 3d space\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n cmapClusters = matplotlib.cm.get_cmap(\"cool\", 2)\n plot = ax.scatter(odPointsAllXs, odPointsAllYs, odPointsAllZs, c=labels, marker=m, cmap=cmapClusters)\n plt.colorbar(plot, label=\"Cluster\").set_ticks([0, 1])\n ax.set_xlabel(\"Max-align-turn\")\n ax.set_ylabel(\"Max-cohere-turn\")\n ax.set_zlabel(\"Max-separate-turn\")\n plt.savefig(\"solution_space_clusters.png\")\n \n # error histogram\n plt.figure()\n plt.hist(histogramData)\n plt.xlabel(\"FM Prediction Error\")\n plt.title(\"mu = \" + str(mu) + \", sigma = \" + str(sigma))\n plt.savefig(\"error_histogram\")\n \n projectDimensions = True\n if(projectDimensions == True):\n # plotting for single demonstration SLP configuration\n\n # plot projections with error\n plt.figure()\n plt.scatter(allXs, allYs, c=allErrors, marker=m, cmap=\"cool\")\n plt.colorbar(label=\"FM Prediction Error\")\n plt.gca().set_xlabel(\"Max-align-turn\")\n plt.gca().set_ylabel(\"Max-cohere-turn\")\n plt.savefig(\"solution_space_xy.png\")\n plt.figure()\n plt.scatter(allXs, allZs, c=allErrors, marker=m, cmap=\"cool\")\n plt.colorbar(label=\"FM Prediction Error\")\n plt.gca().set_xlabel(\"Max-align-turn\")\n plt.gca().set_ylabel(\"Max-separate-turn\")\n plt.savefig(\"solution_space_xz.png\")\n plt.figure()\n plt.scatter(allYs, allZs, c=allErrors, marker=m, cmap=\"cool\")\n plt.colorbar(label=\"FM Prediction Error\")\n plt.gca().set_xlabel(\"Max-cohere-turn\")\n plt.gca().set_ylabel(\"Max-separate-turn\")\n plt.savefig(\"solution_space_yz.png\")\n\n # plot projections with labels\n plt.figure()\n plt.scatter(odPointsAllXs, odPointsAllYs, c=labels, marker=m, cmap=cmapClusters)\n plt.colorbar(label=\"Cluster\").set_ticks([0, 1])\n plt.gca().set_xlabel(\"Max-align-turn\")\n plt.gca().set_ylabel(\"Max-cohere-turn\")\n plt.savefig(\"solution_space_xy_clusters.png\")\n plt.figure()\n plt.scatter(odPointsAllXs, odPointsAllZs, c=labels, marker=m, cmap=cmapClusters)\n plt.colorbar(label=\"Cluster\").set_ticks([0, 1])\n plt.gca().set_xlabel(\"Max-align-turn\")\n plt.gca().set_ylabel(\"Max-separate-turn\")\n plt.savefig(\"solution_space_xz_clusters.png\")\n plt.figure()\n plt.scatter(odPointsAllYs, odPointsAllZs, c=labels, marker=m, cmap=cmapClusters)\n plt.colorbar(label=\"Cluster\").set_ticks([0, 1])\n plt.gca().set_xlabel(\"Max-cohere-turn\")\n plt.gca().set_ylabel(\"Max-separate-turn\")\n plt.savefig(\"solution_space_yz_clusters.png\")\n\n # clear memory\n del allXs, allYs, allZs, allErrors, configurationsToPlot, allConfigurationsToPlot\n\n return allConfigurations\n\n # format configurations for classifiaction\n def format_data_xy(self, allConfigurationsList):\n X = []\n Y = []\n for index in range(len(allConfigurationsList)):\n X += allConfigurationsList[index]\n Y += [index]*len(allConfigurationsList[index])\n\n return [X, Y]\n\n # compute fm errors and select configuration for minimum\n def get_min_error_configurations(self, fm, allConfigurationsList, slps):\n minErrorConfigurations = []\n for allConfigurations in allConfigurationsList:\n # error[0] contains error value\n # error[1] contains configuration\n errors = [(misc.list_sub(slps, fm.predict(configuration)), configuration) for configuration in allConfigurations]\n errors = [(sum([x**2 for x in error[0]]), error[1]) for error in errors]\n errors.sort()\n minErrorConfigurations.append(errors[0][1])\n\n return minErrorConfigurations\n\n # compute average configuration\n def get_mean_configurations(self, allConfigurationsList):\n meanConfigurations = []\n for allConfigurations in allConfigurationsList:\n meanConfiguration = tuple(misc.col_average(allConfigurations))\n meanConfigurations.append(meanConfiguration)\n\n return meanConfigurations\n\n def process_error_weighted_configuration(self, configuration, fm, slps, errorWeightedConfiguration, normalizationFactor, zeroErrorConfigurations):\n prediction = fm.predict(configuration)\n error = 0\n\n for index in range(len(slps)):\n error += (slps[index] - prediction[index])**2\n \n if(error > 0):\n if(zeroErrorConfigurations == 0):\n # no zero error configuration found yet\n normalizationFactor += 1/error\n for index in range(len(errorWeightedConfiguration)):\n errorWeightedConfiguration[index] += configuration[index]/error\n else:\n # error is zero\n if(zeroErrorConfigurations == 0):\n # first time we found a zero error configuration\n for index in range(len(errorWeightedConfiguration)):\n # wipe old data\n errorWeightedConfiguration[index] = 0\n zeroErrorConfigurations += 1\n \n # add new data\n for index in range(len(errorWeightedConfiguration)):\n errorWeightedConfiguration[index] += configuration[index]\n\n return [errorWeightedConfiguration, normalizationFactor, zeroErrorConfigurations]\n \n # compute error weighted average configuration\n def get_error_weighted_configurations(self, fm, allConfigurationsList, slps):\n errorWeightedConfigurations = []\n listIndex = -1\n \n for allConfigurations in allConfigurationsList:\n errorWeightedConfiguration = [0]*self.numIndependent\n normalizationFactor = 0\n zeroErrorConfigurations = 0\n\n if(self.writeToDisk == 1):\n # read from disk\n listIndex += 1\n with gzip.open(\"configuration_\" + str(listIndex+1) + \".pgz\", 'rb') as configurationFile:\n while 1:\n try:\n # read configuration\n configuration = cPickle.load(configurationFile)[listIndex]\n [errorWeightedConfiguration, normalizationFactor, zeroErrorConfigurations] = self.process_error_weighted_configuration(configuration, fm, slps, errorWeightedConfiguration, normalizationFactor, zeroErrorConfigurations)\n except EOFError:\n break\n else:\n # read from ram\n for configuration in allConfigurations:\n [errorWeightedConfiguration, normalizationFactor, zeroErrorConfigurations] = self.process_error_weighted_configuration(configuration, fm, slps, errorWeightedConfiguration, normalizationFactor, zeroErrorConfigurations)\n \n # nomalization\n for index in range(len(errorWeightedConfiguration)):\n if(zeroErrorConfigurations == 0):\n errorWeightedConfiguration[index] /= float(normalizationFactor)\n else:\n # we found some zero error configurations\n # we will only consider those configurations when computing a representative point\n errorWeightedConfiguration[index] /= float(zeroErrorConfigurations)\n \n errorWeightedConfigurations.append(tuple(errorWeightedConfiguration))\n\n return errorWeightedConfigurations\n\n # compute classifier based optimal configuration\n def get_svm_configurations(self, allConfigurationsList):\n # train svm classifier on configurations\n classifier = svm.SVC(probability=True)\n [X, Y] = self.format_data_xy(allConfigurationsList)\n classifier.fit(X, Y)\n\n # select configurations which maximize probability\n svmConfigurations = []\n for index in range(len(allConfigurationsList)):\n # compute score for each probability and select maximum\n configurationProbabilities = classifier.predict_proba(allConfigurationsList[index])\n configurationProbabilities = [(configurationProbabilities[i][index], allConfigurationsList[index][i]) for i in range(len(configurationProbabilities))]\n configurationProbabilities.sort(reverse=True)\n svmConfiguration = tuple(configurationProbabilities[0][1])\n svmConfigurations.append(svmConfiguration)\n\n return svmConfigurations\n\n # compute based on k means clustering\n def get_kmeans_configurations(self, allConfigurationsList):\n # train svm classifier on configurations\n nClusters = len(allConfigurationsList)\n kmeans = KMeans(init='k-means++', n_clusters=nClusters)\n [X, Y] = self.format_data_xy(allConfigurationsList)\n kmeans.fit(X, Y)\n\n # select centroids\n centroids = kmeans.cluster_centers_\n \n return centroids\n\n # run experiment\n def run(self, slpsList, plotConfigurations=False, saveError=False):\n [fm, rm] = self.get_mappings(slpsList)\n\n # append to model list\n self.fm = fm\n self.rm = rm\n\n # plot solution space\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n print >> sys.stderr, \"->Querying RM for configurations\" \n\n startTime = time.time()\n allConfigurationsList = []\n proxyPlots = []\n labels = []\n for slps,c,m in slpsList:\n allConfigurations = self.query_rm(fm, rm, slps, ax, c, m, plotConfigurations)\n allConfigurationsList.append(allConfigurations)\n\n proxyPlot = matplotlib.lines.Line2D([0],[0], linestyle=\"none\", c=c, marker=m)\n proxyPlots.append(proxyPlot)\n labels.append(\"ALPs \" + PLOT_DEMONSTRATIONS[self.rmQueryCount])\n ax.legend(proxyPlots, labels, numpoints = 1)\n self.rmQueryCount += 1\n stopTime = time.time()\n\n plt.close(fig)\n\n # how do we pick one?\n print >> sys.stderr, \"configurations retrieved: \" + str([len(allConfigurations) for allConfigurations in allConfigurationsList]) + \" [in \" + str(stopTime-startTime) + \" seconds]\"\n print >> sys.stderr, \"->Selecting Configurations\"\n\n # downsample by num dep\n # required to make higher dimension feature vectors tractable\n # can be removed later when computation is manageable\n downsampledAllConfigurationsList = []\n for allConfigurations in allConfigurationsList:\n if(len(allConfigurations) > 1):\n downsampledAllConfigurationsList.append(random.sample(allConfigurations, len(allConfigurations)/(self.numDependent**self.numIndependent)))\n else:\n downsampledAllConfigurationsList.append([allConfigurations[0]])\n allConfigurationsList = downsampledAllConfigurationsList\n\n # check forward mapping error for each configuratoin returned\n # check location in point cloud\n startTime = time.time()\n #configurationList = self.get_mean_configurations(allConfigurationsList)\n #configurationList = self.get_min_error_configurations(fm, allConfigurationsList, slps)\n configurationList = self.get_error_weighted_configurations(fm, allConfigurationsList, slps)\n #configurationList = self.get_svm_configurations(allConfigurationsList)\n #configurationList = self.get_kmeans_configurations(allConfigurationsList)\n stopTime = time.time()\n\n # experiment complete\n print >> sys.stderr, \"->Done\" + \" [in \" + str(stopTime-startTime) + \" seconds]\"\n\n # compute error for each configuration\n # print error\n demonstrationIndex = 0\n for slps,c,m in slpsList:\n configuration = configurationList[demonstrationIndex]\n prediction = fm.predict(configuration)\n error = 0\n for index in range(len(slps)):\n error += (slps[index] - prediction[index])**2\n if(saveError == True):\n with open(\"suggested_alps_fm_prediction_error.txt\", \"a\") as predictionErrorFile:\n predictionErrorFile.write(str(error) + \"\\n\")\n demonstrationIndex += 1\n \n return [allConfigurationsList, configurationList]\n\n# learning from visual demonstrations\ndef lfd(plotConfigurations=False, saveError=False):\n # expects configuration file as input\n experiment = Experiment()\n\n # load network if needed\n net = fi.get_network(experiment.descriptorSetting, IMAGE_FEATURE_PATH_PREFIX)\n\n # traverse all demonstration data\n plotIndex = 0\n slpsList = []\n\n # process directories in sorted order\n sortedPaths = []\n for dirName, subdirList, fileList in os.walk(experiment.demonstrationFolder):\n for sortedSubdirName in sorted(subdirList):\n sortedPaths.append(dirName + \"/\" + sortedSubdirName)\n\n # get slps for demonstrations\n for sortedPath in sortedPaths:\n for dirName, subdirList, fileList in os.walk(sortedPath):\n # only use non empty subdirectories\n if(len(fileList) > 0):\n # traverse all files in this directory\n if(bool(experiment.images) == True):\n # read dependent values as images\n dependentValues = [tuple(fi.get_features(net, dirName + \"/\" + fileName, experiment.descriptorSetting)) for fileName in fileList]\n else:\n # read dependent values as text\n dependentValues = [tuple([eval(x) for x in open(dirName + \"/\" + fileName).readlines()[0].replace(\"\\n\", \"\").split(\" \")]) for fileName in fileList]\n \n dependentValues = tuple(misc.col_average(dependentValues))\n #print >> sys.stderr, \"dependent values: \" + str(dependentValues)\n \n # edit scale data file name based on descriptor setting\n scaleDataFileNamePrefix = experiment.scaleDataFileName.split(\".\")[0]\n scaleDataFileNameExtension = experiment.scaleDataFileName.split(\".\")[1]\n scaleDataFileName = scaleDataFileNamePrefix + \"_\" + experiment.abm\n if(bool(experiment.images) == True):\n scaleDataFileName += \"_\" + experiment.descriptorSetting.lower()\n scaleDataFileName += \".\" + scaleDataFileNameExtension\n \n # scale demonstration data\n with open(\"sampling/\" + experiment.abm + \"/\" + scaleDataFileName, \"r\") as scaleDataFile:\n # read in order of writing in map.py\n maxs = eval(scaleDataFile.readline().strip(\"\\n\"))\n mins = eval(scaleDataFile.readline().strip(\"\\n\"))\n \n dependentMaxs = maxs[experiment.numIndependent:]\n dependentMins = mins[experiment.numIndependent:]\n dependentValuesMax = {i: dependentMaxs[i] for i in range(len(dependentMaxs))}\n dependentValuesMin = {i: dependentMins[i] for i in range(len(dependentMins))}\n dependentValues = tuple((dependentValues[index] - dependentValuesMin[index])/(dependentValuesMax[index] - dependentValuesMin[index]) if (dependentValuesMax[index] > dependentValuesMin[index]) else dependentValuesMax[index] for index in range(len(dependentValues)))\n\n # reduce dependent values if neccessary\n if(eval(os.environ[\"REDUCED\"]) == 1): \n model = cPickle.load(open(\"sampling/utils/transforms/\" + experiment.abm +\"_reduced_model.p\", \"rb\")) \n dependentValues = model.transform(np.array(dependentValues).reshape(1, -1))\n dependentValues = tuple(dependentValues[0])\n\n # if feedback is on\n # use feedback to modify queried slps\n if((eval(os.environ[\"FEEDBACK\"]) == 1) and (os.path.isfile(\"queried_slps.txt\") == True)):\n with open(\"queried_slps.txt\", \"r\") as inFile:\n queriedSlps = eval(inFile.readlines()[0])\n slpsList.append((queriedSlps, PLOT_COLOR[plotIndex], PLOT_SHAPE[plotIndex]))\n else:\n slpsList.append((dependentValues, PLOT_COLOR[plotIndex], PLOT_SHAPE[plotIndex]))\n\n plotIndex += 1\n\n # default value for queried slps\n # same as current demonstration slps\n queriedSlps = dependentValues\n\n #\n # feedback code\n #\n \n # random initialization if feedback is not yet applicable\n if(eval(os.environ[\"FEEDBACK\"]) == 1):\n with open(\"feedback_iteration.txt\", \"r\") as inFile:\n iteration = eval(inFile.readlines()[0])\n if(iteration <= 2):\n with open(\"regression_parameter.txt\", \"w\") as outFile:\n regressionParameterLowerLimit = eval(os.environ[\"REGRESSION_PARAMETER_LOWER_LIMIT\"])\n regressionParameterUpperLimit = eval(os.environ[\"REGRESSION_PARAMETER_UPPER_LIMIT\"])\n regressionParameter = regressionParameterLowerLimit + (regressionParameterUpperLimit-regressionParameterLowerLimit)*random.random()\n outFile.write(str(regressionParameter) + \"\\n\")\n \n # TODO: currently only works for a single slp suggestion\n # check if file exists\n if((eval(os.environ[\"FEEDBACK\"]) == 1) and (os.path.isfile(\"queried_slps.txt\") == True)):\n # use feedback to modify queried slps\n with open(\"queried_slps.txt\", \"r\") as inFile:\n queriedSlps = eval(inFile.readlines()[0])\n\n # check if file exists\n if(os.path.isfile(\"suggested_slps.txt\") == True):\n with open(\"suggested_slps.txt\", \"r\") as inFile:\n suggestedSlps = eval(inFile.readlines()[0])[0]\n\n # check consistency of suggested slps with current configuration\n if(len(suggestedSlps) == len(dependentValues)):\n #\n # fm dataset optimization\n #\n \n # scale suggested slps\n # TODO: make this a common function since its used everywhere\n suggestedSlps = tuple((suggestedSlps[index] - dependentValuesMin[index])/(dependentValuesMax[index] - dependentValuesMin[index]) if (dependentValuesMax[index] > dependentValuesMin[index]) else dependentValuesMax[index] for index in range(len(suggestedSlps)))\n\n # the differential accounts for the distance from the demonstration slps \n # demonstration slps are currently stored as dependentValues\n with open(\"feedback_iteration.txt\", \"r\") as inFile:\n iteration = inFile.readlines()[0].strip(\"\\n\")\n alpha = eval(os.environ[\"LEARNING_RATE_FM_DATASET\"])\n alpha /= eval(iteration) \n queriedSlps = tuple([queriedSlps[index] + alpha*(dependentValues[index] - suggestedSlps[index]) for index in range(len(dependentValues))])\n \n # write data\n with open(\"feedback_input_difference.txt\", \"a\") as outFile:\n outFile.write(str(np.abs(dependentValues[0] - queriedSlps[0])) + \"\\n\")\n with open(\"feedback_output_difference.txt\", \"a\") as outFile:\n outFile.write(str(dependentValues[0] - suggestedSlps[0]) + \"\\n\")\n\n #\n # regression parameter optimization initialization\n #\n\n # if just one iteration so far, move in a random direction\n with open(\"feedback_iteration.txt\", \"r\") as inFile:\n iteration = eval(inFile.readlines()[0])\n if(iteration > 2):\n # now we do feedback stuff\n with open(\"feedback_regression_parameter.txt\", \"r\") as feedbackRegressionParameterFile:\n with open(\"feedback_output_difference.txt\", \"r\") as feedbackOutputDifferenceFile:\n regressionParameterLines = feedbackRegressionParameterFile.readlines()\n outputDifferenceLines = feedbackOutputDifferenceFile.readlines()\n currentRegressionParameter = eval(regressionParameterLines[-1])\n previousRegressionParameter = eval(regressionParameterLines[-2])\n regressionParameterChange = currentRegressionParameter - previousRegressionParameter\n currentOutputDifference = eval(outputDifferenceLines[-1])\n previousOutputDifference = eval(outputDifferenceLines[-2])\n outputDifferenceChange = currentOutputDifference - previousOutputDifference\n \n alpha = eval(os.environ[\"LEARNING_RATE_REGRESSION_PARAMETER\"])\n alpha /= iteration\n\n # if output difference is negative, we move in direction to change\n nextRegressionParameter = currentRegressionParameter - regressionParameterChange*outputDifferenceChange*alpha\n with open(\"regression_parameter.txt\", \"w\") as outFile:\n outFile.write(str(nextRegressionParameter) + \"\\n\")\n \n # TODO: currently assumes only a single demonstration at a time. fix for multiple demonstrations\n with open(\"queried_slps.txt\", \"w\") as outFile:\n outFile.write(str(queriedSlps) + \"\\n\")\n \n configurationListPerRun = []\n\n for run in range(RUNS_PER_SLPS):\n # run and query slps\n [allConfigurationsList, configurationList] = experiment.run(slpsList, plotConfigurations=plotConfigurations, saveError=saveError)\n\n # scale configurations back from normalized space\n independentMaxs = maxs[:experiment.numIndependent]\n independentMins = mins[:experiment.numIndependent]\n independentValuesMax = {i: independentMaxs[i] for i in range(len(independentMaxs))}\n independentValuesMin = {i: independentMins[i] for i in range(len(independentMins))}\n normalizedConfigurationList = []\n for configuration in configurationList:\n normalizedConfigurationList.append(tuple([independentValuesMin[cIndex] + (independentValuesMax[cIndex]-independentValuesMin[cIndex])*configuration[cIndex] for cIndex in range(len(configuration))]))\n configurationList = tuple(normalizedConfigurationList)\n configurationListPerRun.append(configurationList)\n \n # return configurations\n print >> sys.stderr, \"configurationList: \" + str(configurationList)\n\n # compute average over runs\n averageConfigurationList = []\n for slpsIndex in range(len(slpsList)):\n slpsConfigurations = tuple([configurationListOneRun[slpsIndex] for configurationListOneRun in configurationListPerRun])\n averageConfigurationList.append(tuple(misc.col_average(slpsConfigurations)))\n\n print >> sys.stderr, \"averageConfigurationList: \" + str(averageConfigurationList)\n sys.stdout.write(str(averageConfigurationList))\n\n if(\"smt\" in experiment.regression):\n # circumvent excessive prints from smt\n with open(\"temp_average_configuration_list.txt\", \"w\") as outFile:\n outFile.write(str(averageConfigurationList) + \"\\n\")\n\n# main function\ndef main():\n lfd(plotConfigurations=False, saveError=True)\n\n# execute main\nmain()\n" }, { "alpha_fraction": 0.80923992395401, "alphanum_fraction": 0.8159463405609131, "avg_line_length": 48.703704833984375, "blob_id": "867655188aa23578bcdd69bc7ce77b29870b3d76", "content_id": "4c735cf47aaed6e293b4cb5b9959c5e2a5fa13f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 117, "num_lines": 27, "path": "/data/dataset_analysis/dataset_analysis.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# demonstration specification\nexport DEMONSTRATION_ID=1\n\n# steps across dimensions\nexport DIM1_STEP=0.1\nexport DIM2_STEP=0.1\n\n# clean old datasets\nrm -f $DEMONSTRATION_ID/predictions_best/slp_dataset.txt\nrm -f $DEMONSTRATION_ID/predictions_second_best/slp_dataset.txt\nrm -f $DEMONSTRATION_ID/predictions_third_best/slp_dataset.txt\nrm -f $DEMONSTRATION_ID/predictions_worst/slp_dataset.txt\nrm -f $DEMONSTRATION_ID/predictions_second_worst/slp_dataset.txt\nrm -f $DEMONSTRATION_ID/predictions_third_worst/slp_dataset.txt\n\n# make a copy of the datasets\n# uniform name for ease of use\n\ncp $DEMONSTRATION_ID/predictions_best/slp_dataset* $DEMONSTRATION_ID/predictions_best/slp_dataset.txt\ncp $DEMONSTRATION_ID/predictions_second_best/slp_dataset* $DEMONSTRATION_ID/predictions_second_best/slp_dataset.txt\ncp $DEMONSTRATION_ID/predictions_third_best/slp_dataset* $DEMONSTRATION_ID/predictions_third_best/slp_dataset.txt\ncp $DEMONSTRATION_ID/predictions_worst/slp_dataset* $DEMONSTRATION_ID/predictions_worst/slp_dataset.txt\ncp $DEMONSTRATION_ID/predictions_second_worst/slp_dataset* $DEMONSTRATION_ID/predictions_second_worst/slp_dataset.txt\ncp $DEMONSTRATION_ID/predictions_third_worst/slp_dataset* $DEMONSTRATION_ID/predictions_third_worst/slp_dataset.txt\n\n# invoke python script\npython dataset_analysis.py $DEMONSTRATION_ID $DIM1_STEP $DIM2_STEP\n" }, { "alpha_fraction": 0.5875629782676697, "alphanum_fraction": 0.5924505591392517, "avg_line_length": 34.4640007019043, "blob_id": "978978042f347b82c80ef1a467ac77c89710c4c0", "content_id": "0dd2c28a3cbaf433e292feaf53f2e43974e9e05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13299, "license_type": "no_license", "max_line_length": 181, "num_lines": 375, "path": "/src/amf/rm.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# rm.py contains the ReverseMapping\n\nimport misc\nimport numpy\nimport sys\nimport multiprocessing as mp\nimport time\nimport itertools\nimport random\nimport os\nimport gzip\nimport cPickle\n\n# parallel processing constants\nBATCH_SIZE = 8\n\n# hack to avoid rounding error\ndef _round(n):\n return numpy.round(n, 5)\n\nclass RMSimplex(object):\n # Simplexes are kind of immutable...\n\n # The LAST variable of each corner vector is the dependent variable\n # (this is important)\n\n def __init__(self, corners, slpsList):\n \"\"\"\n A simplex.\n corners should have the form ((ind0, ind1, ...), (dep1, dep2, dep3))\n \"\"\"\n # We already know which corners are adjanced to which\n # (each corner shares an edge with every other corner)\n\n self._corners = tuple(corners)\n self._inds = tuple( corner[0] for corner in corners )\n self._deps = tuple( corner[1] for corner in corners )\n self._min = misc.col_min(self._deps)\n self._max = misc.col_max(self._deps) \n self._edges = []\n\n # initialize edges only if needed\n # only if this simplex contains the query slp\n # otherwise the slp has no utility\n isContains = False\n \n for querySlps,c,m in slpsList:\n for index in range(len(querySlps)):\n isContains = isContains or self.contains(index, querySlps[index])\n \n if(isContains == True):\n for cidx in range(len(self._corners)):\n corner = self._corners[cidx]\n for oidx in range(cidx + 1, len(self._corners)):\n other_corner = self._corners[oidx]\n self._edges.append((corner, other_corner))\n self._edges = tuple(self._edges)\n\n @property\n def corners(self):\n return self._corners\n\n @property\n def edges(self):\n return self._edges\n\n def contains(self, slp_num, value):\n \"\"\"Determines if the value for slp_num idx lives inside this simplex.\"\"\"\n return ((value >= self._min[slp_num]) and (value <= self._max[slp_num]))\n\n def intersections(self, slp_num, value):\n \"\"\"Returns the approximate places that the value plane intersects with this simplex\"\"\"\n inters = []\n for edgeNumber, (scorner, bcorner) in enumerate(self.edges):\n # maintain order of scorner <= bcorner\n if(scorner[1][slp_num] > bcorner[1][slp_num]):\n tempCorner = bcorner\n bcorner = scorner\n scorner = tempCorner\n\n # check to see that this value is inside these edges\n if not ((value[slp_num] >= scorner[1][slp_num]) and (value[slp_num] <= bcorner[1][slp_num])):\n continue\n\n # this is the distance between the 2 corner values\n distance = bcorner[1][slp_num] - scorner[1][slp_num]\n\n if(distance == 0):\n # return any end point value\n inter = tuple( (1.0)*s for s, b in zip(scorner[0], bcorner[0]) )\n else:\n # this is the distance from 'value' from the smaller corner\n # if value is close to smaller, this ratio will be close to zero\n # if value is closer to bigger, this ratio will be close to one\n ratio = (value[slp_num] - scorner[1][slp_num]) / distance\n inter = tuple( (1.0-ratio)*s + ratio * b for s, b in zip(scorner[0], bcorner[0]) )\n \n inters.append([edgeNumber, inter])\n\n return tuple(inters)\n\n def __repr__(self):\n return 'simplex:' + repr(self._edges)\n\nclass ReverseMapping(object):\n def __init__(self, trained_fm, ranges, granularity, slpsList):\n\n self.fm = trained_fm\n self.granularity = granularity\n self.ranges = ranges\n self.steps = tuple( (ma - mi) / float(self.granularity) for mi, ma in self.ranges )\n self.slpsList = slpsList\n\n # disk write stuff\n self.writeToDisk = eval(os.environ[\"WRITE_TO_DISK\"])\n self.allIntersectionsCalls = 0\n \n # the dimensionality of the configuration space\n self.dim = len(ranges)\n\n # the dimensionality of the SLP space\n self.slp_dim = None\n\n self.__bin__ = misc.binary(self.dim)\n\n self.knots = {}\n self.simplexes = []\n print >> sys.stderr, 'building knots'\n startTime = time.time()\n self.build_knots()\n stopTime = time.time()\n print >> sys.stderr, 'done building knots, now building simplexes' + \" [\" + str(len(self.knots)) + \" in \" + str(stopTime-startTime) + \" seconds]\"\n startTime = time.time() \n self.build_simplexes()\n stopTime = time.time()\n print >> sys.stderr, 'done building simplexes' + \" [in \" + str(stopTime-startTime) + \" seconds]\"\n\n def build_knots(self):\n self.build_knots_inds()\n self.build_knots_deps()\n\n def build_knots_inds(self):\n values = [list(numpy.arange(self.ranges[curDim][0], self.ranges[curDim][1], (self.ranges[curDim][1] - self.ranges[curDim][0])/self.granularity)) for curDim in range(self.dim)]\n indexes = [range(len(values[i])) for i in range(len(values))]\n valuesPoints = [x for x in itertools.product(*values)]\n indexesPoints = [x for x in itertools.product(*indexes)]\n\n # sort items for correspondence\n valuesPoints.sort()\n indexesPoints.sort()\n\n # assign key-value pairs\n self.knots = dict(zip(indexesPoints, valuesPoints))\n\n def process_build_knots_deps(self, q, new_idx):\n new_vector = self.knots[new_idx]\n predicted = self.fm.predict(new_vector)\n q.put([new_idx, predicted])\n\n def build_knots_deps_parallel(self):\n # setup a list of processes that we want to run\n q = mp.Queue()\n processes = [mp.Process(target=self.process_build_knots_deps, args=(q, new_idx)) for new_idx in self.knots] \n batchSize = BATCH_SIZE\n batches = [processes[i:i+batchSize] for i in range(0, len(processes), batchSize)]\n\n for batch in batches:\n # run processes\n for p in batch:\n p.start() \n for p in batch:\n [new_idx, predicted] = q.get()\n self.knots[new_idx] = predicted\n for p in batch:\n p.join() \n\n # use last predicted point in dependent parameter space for length\n # we can use any key for this\n self.slp_dim = len(self.knots.values()[0])\n\n def build_knots_deps(self):\n # predict for each point in independent parameter space\n for new_idx in self.knots:\n new_vector = self.knots[new_idx]\n predicted = self.fm.predict(new_vector)\n self.knots[new_idx] = predicted\n \n # use last predicted point in dependent parameter space for length\n self.slp_dim = len(predicted)\n \n def build_simplexes(self, col=None):\n for key in sorted(self.knots.keys()):\n # check to see if this root is on the fringe and thus not actually a root\n for idx in key:\n if idx == (self.granularity - 1):\n break\n else:\n # extract simplexes\n # only keep those simplexes which are actually useful\n sims = self.simplexes_from_cube(key)\n sims = [x for x in sims if(len(x._edges) > 0)]\n\n if(self.writeToDisk == 1):\n # save to compressed file\n with gzip.open(\"sims.pgz\", 'ab') as simsFile:\n cPickle.dump(sims, simsFile, -1)\n else:\n # save to ram\n self.simplexes.extend(sims)\n\n '''\n # karan: testing downsampling\n sampleAmount = 100000\n\n if(len(self.simplexes) > sampleAmount):\n # downsample\n # for computational tractability\n sampledSimplexes = dict(random.sample(self.simplexes.items(), sampleAmount))\n self.simplexes = sampledSimplexes\n '''\n \n def wiggle(self, root):\n \"\"\" Returns the closest point to this root that is an actual root. \"\"\"\n distance, knot, knotKey = min((misc.distance(root, self.knots[knotKey]), self.knots[knotKey], knotKey) for knotKey in self.knots.keys())\n print >> sys.stderr, \"BADNESS: Wiggling point %s to knot %s\" % (repr(root), repr(knot))\n\n return [knot, knotKey]\n\n def cube_at_root(self, root):\n \"\"\" Returns the members of this cube with this point as its root. \"\"\"\n corners = []\n\n for perm in self.__bin__:\n corner = []\n for idx in xrange(self.dim):\n corner.append(perm[idx] + root[idx])\n corner = tuple(corner)\n val = self.knots[corner]\n corners.append((self.translate_idx(corner), val))\n\n return tuple(corners)\n\n def translate_idx(self, vec_idx):\n \"\"\" Translates a index to a configuration\"\"\"\n corner = []\n for idx in xrange(self.dim):\n corner.append(vec_idx[idx] * self.steps[idx] + self.ranges[idx][0])\n\n return tuple(corner)\n\n def simplexes_from_cube(self, root):\n cube_corners = self.cube_at_root(root)\n # cube_at_root always has the first right corner root as [0]\n # at [-1] we have the other right corner root\n\n # special case for 1 dimensional configuration space\n if self.dim == 1:\n return (RMSimplex(cube_corners, self.slpsList),)\n else:\n return RMSimplex(cube_corners[:-1], self.slpsList), RMSimplex(cube_corners[1:], self.slpsList)\n\n def process_intersections(self, q, simplex, param_num, value):\n out = []\n inter = simplex.intersections(param_num, value)\n if len(inter) > 0:\n out.append(inter)\n\n q.put(out)\n \n def intersections_parallel(self, param_num, value):\n out = []\n\n # setup a list of processes that we want to run\n q = mp.Queue()\n processes = [mp.Process(target=self.process_intersections, args=(q, simplex, param_num, value)) for simplex in self.simplexes] \n batchSize = BATCH_SIZE\n batches = [processes[i:i+batchSize] for i in range(0, len(processes), batchSize)]\n\n for batch in batches:\n # run processes\n for p in batch:\n p.start() \n for p in batch:\n out += q.get()\n for p in batch:\n p.join() \n\n return tuple(out)\n \n def intersections(self, param_num, value):\n out = []\n\n if(self.writeToDisk == 1):\n # read from disk\n with gzip.open(\"sims.pgz\", 'rb') as simsFile:\n with gzip.open(\"configuration_\" + str(self.allIntersectionsCalls) + \".pgz\", 'ab') as configurationFile:\n while 1:\n try:\n sims = cPickle.load(simsFile)\n for simplex in sims:\n inter = simplex.intersections(param_num, value)\n if len(inter) > 0:\n for configurations in inter:\n for configuration in configurations:\n cPickle.dump(inter, configurationFile, -1)\n\t except EOFError:\n break\n else:\n # read from ram\n for simplexNumber, simplex in enumerate(self.simplexes):\n inter = simplex.intersections(param_num, value)\n if len(inter) > 0:\n out.append([simplexNumber, inter])\n\n return tuple(out)\n\n def process_all_intersections(self, q, d, value):\n q.put(self.intersections(d, value))\n \n def all_intersections_parallel(self, value):\n allIntersections = []\n \n # setup a list of processes that we want to run\n q = mp.Queue()\n processes = [mp.Process(target=self.process_all_intersections, args=(q, d, value)) for d in range(self.slp_dim)] \n batchSize = BATCH_SIZE\n batches = [processes[i:i+batchSize] for i in range(0, len(processes), batchSize)]\n\n for batch in batches:\n # run processes\n for p in batch:\n p.start() \n for p in batch:\n allIntersections.append(q.get())\n for p in batch:\n p.join() \n\n if(len(allIntersections[0]) > 0):\n return tuple(allIntersections)\n else:\n # empty intersection set. need to wiggle\n print >> sys.stderr, \"BADNESS: PARALLELISM NOT USEFUL BECAUSE OF WIGGLE!\"\n [wiggledValue, wiggledKey] = self.wiggle(value)\n return [[[tuple([x/float(self.granularity) for x in wiggledKey])]]]\n \n def all_intersections(self, value):\n # increment call counter\n self.allIntersectionsCalls += 1\n \n \"\"\" Finds intersections with each provided SLP \"\"\"\n allIntersections = [self.intersections(d, value) for d in range(self.slp_dim)]\n\n if(len(allIntersections[0]) > 0):\n return tuple(allIntersections)\n else:\n # empty intersection set. need to wiggle\n [wiggledValue, wiggledKey] = self.wiggle(value)\n return [[[tuple([x/float(self.granularity) for x in wiggledKey])]]]\n \n def distance_to(self, param_num, config, value):\n # i'm going to cheat for now and just pick the closest intersecting corner\n ints = self.intersections(param_num, value)\n\n dists = []\n for simplex in ints:\n for i in simplex:\n dists.append( (misc.distance(i, config)) )\n \n if len(dists) == 0:\n print >> sys.stderr, \"BADNESS: no intersections found for %s\" % repr(value)\n dist = None\n else:\n dist = min(dists)\n\n return dist\n" }, { "alpha_fraction": 0.7288135886192322, "alphanum_fraction": 0.7288135886192322, "avg_line_length": 13.75, "blob_id": "70e2bd8c28ffa2d4d8281f4a0f7c39228fb33b5b", "content_id": "0a7fa60de76adc20c8a52e56fcce98102fdeb963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 118, "license_type": "no_license", "max_line_length": 23, "num_lines": 8, "path": "/src/sampling.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# load configuration\nsource configuration.sh\n\n# sample data\necho \"=>sampling data\"\ncd sampling\n./sampling.sh\ncd ../..\n" }, { "alpha_fraction": 0.6308630108833313, "alphanum_fraction": 0.6374296545982361, "avg_line_length": 26.50967788696289, "blob_id": "d5e3cd2f68cc6adb325d89933359324154c70220", "content_id": "26f32adeb7f1598db01cad8611ba640f9415ecfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4264, "license_type": "no_license", "max_line_length": 140, "num_lines": 155, "path": "/src/sampling/eum/sample_eum_parallel.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport sys\nimport multiprocessing as mp\n\nfrom negotiation_model import *\nfrom bdm_agent import *\n\n# parallel processing constants\nBATCH_SIZE = 8\n\n# seed this for fixed environment\n# for better replication of results\nrandom.seed(0)\n\n# constants\nINDEPENDENT_VARIABLES = 2\nDEPENDENT_VARIABLES = 2\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 200\n\n# process single run data\ndef process_run(q, run, qValue, tValue, bookData, salienceValues):\n try:\n line = main_eum(qValue, tValue, bookData, salienceValues)\n except:\n line = \"\"\n \n # add newline for clean output\n line = \"\\n\" + line\n \n # write to queue\n q.put(line)\n\ndef get_salience_values(timeLapse, numberOfAgents):\n salienceValues = np.zeros((timeLapse, numberOfAgents))\n\n for stepNumber in range(timeLapse):\n for agentNumber in range(numberOfAgents):\n salienceValues[stepNumber][agentNumber] = random.random()\n\n return salienceValues\n\n# Defining the model objects\nclass BDMActor(NegotiationActor):\n DecisionClass = BDM_Agent\n\nclass NegotiationModel_(NegotiationModel):\n # Variables for median caching\n median_computed_last = -1\n median = -1\n \n def find_median(self):\n if self.median_computed_last != self.schedule.steps:\n self.median = super().find_median()\n self.median_computed_last = self.schedule.steps\n return self.median\n\nclass ModelOutput:\n def __init__(self, model):\n '''\n Store data from model run.\n '''\n self.agent_vars = model.datacollector.get_agent_vars_dataframe()\n self.model_vars = model.datacollector.get_model_vars_dataframe()\n self.log = model.log\n\ndef load_data():\n # Load data \n bookData = pd.read_csv(\"BDM_ColdWar.csv\")\n bookData.Position = (bookData.Position + 100)/200\n\n return bookData\n\ndef main_eum(qValue, tValue, bookData, salienceValues):\n # define agents\n agents = []\n\n for i, row in bookData.iterrows():\n newAgent = BDMActor(row.Country, row.Capability, row.Position, 1)\n newAgent.decision_model.Q = qValue\n newAgent.decision_model.T = tValue\n newAgent.salience = salienceValues[i]\n agents.append(newAgent)\n\n # instantiate model\n model = NegotiationModel_(agents)\n\n # run model\n for stepNumber in range(TIME_LAPSE):\n agentNumber = 0\n for agent in model.agents:\n #agent.salience = random.random()\n agent.salience = salienceValues[stepNumber][agentNumber]\n agentNumber += 1 \n model.step()\n\n # collect data for next steps\n dependentValues = []\n modelOutput = ModelOutput(model)\n dependentValues.append(list(modelOutput.model_vars[\"Median\"][-SAMPLES-1:]))\n dependentValues.append(list(modelOutput.model_vars[\"Mean\"][-SAMPLES-1:]))\n \n # print line corresponding to this execution\n line = str(qValue) + \" \" + str(tValue)\n for dependentValue in dependentValues:\n line += \" \" + str(dependentValue).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\n\n return line\n\ndef main():\n # header string\n print(\"i\"*INDEPENDENT_VARIABLES + \"d\"*DEPENDENT_VARIABLES)\n\n # read data\n bookData = load_data()\n\n # get salience values\n [rows, columns] = bookData.shape\n salienceValues = get_salience_values(TIME_LAPSE, rows)\n\n qValues = []\n tValues = []\n \n for run in range(RUNS):\n # sample random ALPs\n qValue = random.random()\n tValue = random.random()\n\n qValues.append(qValue)\n tValues.append(tValue)\n \n # setup a list of processes that we want to run\n q = mp.Queue()\n processes = [mp.Process(target=process_run, args=(q, run, qValues[run], tValues[run], bookData, salienceValues)) for run in range(RUNS)]\n batchSize = BATCH_SIZE\n batches = [processes[i:i+batchSize] for i in range(0, len(processes), batchSize)]\n\n outFile = open(\"output.txt\", \"a\")\n \n for batch in batches:\n # run processes\n for p in batch:\n p.start()\n for p in batch:\n line = q.get()\n outFile.write(line)\n # exit the completed processes\n for p in batch:\n p.join()\n\n outFile.close()\n\nmain()\n" }, { "alpha_fraction": 0.5772079825401306, "alphanum_fraction": 0.5948718190193176, "avg_line_length": 21.5, "blob_id": "172f17fb56c1f893cf528f867f348c507707bc76", "content_id": "5a0207a4e6641ab024fb9863a512420f9f05a4ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1755, "license_type": "no_license", "max_line_length": 92, "num_lines": 78, "path": "/src/sampling/schelling/sample_schelling.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import SchellingModel\nimport random\n\n# only use three of the several ALPs\n# density (0,1)\n# minority_pc (0,1)\n\n# use one model-level SLP\n# happy\n\n# constant ALPs\nHEIGHT = 20\nWIDTH = 20\nHOMOPHILY = 4\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 2\nDEPENDENT_VARIABLES = 1\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 1000\n\ndef main_schelling(density, minorityPc):\n # instantiate and run model\n model = SchellingModel(\n height=HEIGHT,\n width=WIDTH,\n density=density,\n minority_pc=minorityPc,\n homophily=HOMOPHILY)\n\n # run time lapse\n for i in range(TIME_LAPSE):\n try:\n # step\n model.step()\n except:\n # no empty cells\n pass\n \n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n try:\n # step\n model.step()\n except:\n # saturated\n # no empty cells\n pass\n\n # read data\n data = model.datacollector.get_model_vars_dataframe() \n dependentValues.append(list(data.happy)[-SAMPLES-1:])\n\n # print line corresponding to this execution\n line = str(density) + \" \" + str(minorityPc)\n for dependentValue in dependentValues:\n line += \" \" + str(dependentValue).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\n print(line)\n\n \ndef main():\n # header string\n print(\"i\"*INDEPENDENT_VARIABLES + \"d\"*DEPENDENT_VARIABLES)\n\n for run in range(RUNS):\n # sample random ALPs\n density = random.random() \n minorityPc = random.random() \n \n # run model using those ALPs\n main_schelling(density, minorityPc)\n\nmain()\n" }, { "alpha_fraction": 0.6653721928596497, "alphanum_fraction": 0.6737864017486572, "avg_line_length": 29.294116973876953, "blob_id": "106a3afa59283bbc5813e8bc9048073d87b51f60", "content_id": "f7c189ef2ce03983c7a3db9a1fa8665624f9f79d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 111, "num_lines": 51, "path": "/app/es_server.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from elasticsearch import Elasticsearch\nfrom image_match.elasticsearch_driver import SignatureES\nimport glob\nfrom flask import Flask, render_template, request\nimport os\nimport time\n\napp = Flask(__name__)\napp.config[\"INPUT_FILE\"] = \"sketches/demonstration.png\"\napp.config[\"ABM\"] = \"flocking\"\napp.config[\"SES\"] = SignatureES(Elasticsearch(), distance_cutoff=1.0)\n\[email protected]('/initialize', methods=[\"GET\"])\ndef initialize():\n # clear existing indexes\n os.system(\"curl -XDELETE 'http://localhost:9200/_all'\")\n\n # sleep to be sure of synchronization\n time.sleep(60)\n \n # index items\n folderName = \"/home/karan/storage/workspaces/bitbucket/_swarm-lfd-data/\" + app.config[\"ABM\"] + \"/images_10\"\n #folderName = \"/home/karan/storage/workspaces/bitbucket/_swarm-lfd-data/\" + app.config[\"ABM\"] + \"/images\"\n allFileNames = glob.glob(folderName + \"/*.*\")\n\n for imageName in sorted(allFileNames):\n print('elastic search initialize: ' + imageName)\n app.config[\"SES\"].add_image(imageName)\n\n # sleep to be sure of synchronization\n time.sleep(60)\n \n return \"initialization complete\"\n\n\[email protected]('/match', methods=[\"GET\"])\ndef match():\n results = app.config[\"SES\"].search_image(app.config[\"INPUT_FILE\"])\n\n distanceResults = []\n for result in results:\n distanceResults.append((result['dist'], result['path']))\n\n # get top result\n [distance, path] = sorted(distanceResults)[0]\n os.system('cp ' + path + ' matches/')\n \n return \"matched\"\n\n# code run at app start\ninitialize()\n" }, { "alpha_fraction": 0.6937590837478638, "alphanum_fraction": 0.698113203048706, "avg_line_length": 25.461538314819336, "blob_id": "e035ad7ba763933b646f7bbf7f9c261f52b7789c", "content_id": "dc860b5c8baa82f6f21ccb359bcc3ff3eafbc5b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 689, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/src/lfd.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# load configuration\nsource configuration.sh\n\n# run multiple times to generate multiple sets of images\nexport RUNS_PER_INPUT=1\n\n# remove existing data\nrm -f suggested_alps.txt\nrm -f suggested_alps_fm_prediction_error.txt\n\nfor i in $(seq \"$RUNS_PER_INPUT\")\ndo\n # run experiment\n echo \"=>running experiment\"\n export ALP_CONFIGURATIONS=$(python lfd.py)\n echo $ALP_CONFIGURATIONS >> suggested_alps.txt\n \n # circumvent excessive prints from smt \n mv temp_average_configuration_list.txt suggested_alps.txt 2>/dev/null\n \n if [ $WRITE_TO_DISK == 1 ]; then\n # remove compressed pickle files generated for this run\n rm *.pgz\n fi\ndone\n\n" }, { "alpha_fraction": 0.6831682920455933, "alphanum_fraction": 0.7623762488365173, "avg_line_length": 24.25, "blob_id": "a54ecdd6f4685d3e1936514766d0a201aa8a708e", "content_id": "a509b2a730edd34ff284c45c582bca0af6df31d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 101, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/app/run_es_server.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "sudo service elasticsearch start\n\nexport FLASK_APP=es_server.py\nflask run --host=0.0.0.0 --port=5001\n" }, { "alpha_fraction": 0.5775167942047119, "alphanum_fraction": 0.5892617702484131, "avg_line_length": 28.799999237060547, "blob_id": "6dd53eb41161c6b2279e68b5d703786da0993126", "content_id": "0a54a5e6f680756fcb7a659d7b5332ebb0e23535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2980, "license_type": "no_license", "max_line_length": 115, "num_lines": 100, "path": "/src/sampling/wolf_sheep/predict_wolf_sheep.java", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "/* package required for calls from parent folder */\npackage sampling.wolf_sheep;\n\n/* libraries included */\nimport org.nlogo.headless.HeadlessWorkspace;\nimport java.io.*;\nimport java.util.*;\n\npublic class predict_wolf_sheep {\n public static void main(String[] argv) {\n HeadlessWorkspace workspace =\n HeadlessWorkspace.newInstance() ;\n try {\n\t// evaluate to list\n\tString alpConfigurationListString = argv[0];\n\tList<String> alpConfigurationList = new ArrayList<String>(Arrays.asList(alpConfigurationListString.split(\"\\\\(\")));\n\n\n\t// skip first entry, which is empty\n\tfor (int i = 1; i < alpConfigurationList.size(); i++) {\n\t\tString alpConfiguration = alpConfigurationList.get(i);\n\n\t\t// clean string\n\t\talpConfiguration = alpConfiguration.replace(\")\", \"\").replace(\"]\", \"\");\n\n\t\t// extract variables\n\t\tArrayList<String> alpValues = new ArrayList<String>(Arrays.asList(alpConfiguration.split(\",\")));\n\n workspace.open(\"sampling/wolf_sheep/Wolf Sheep Predation.nlogo\");\n\n \tworkspace.command(\"set grass? true\");\n workspace.command(\"set initial-number-sheep 100\");\n workspace.command(\"set initial-number-wolves 50\"); \n \n \t// wolf-reproduce\n \tString k1 = alpValues.get(0);\n \t\n \t// sheep-reproduce\n\t\t\t\tString k2 = alpValues.get(1);\n\t\t\t\t\n\t\t\t\t//wolf-gain-from-food\n\t\t\t\tString k3 = alpValues.get(2);\n\t\t\t\t\n\t\t\t\t//sheep-gain-from-food\n\t\t\t\tString k4 = alpValues.get(3);\n\t\t\t\t\n\t\t\t\t//grass-regrowth-time\n\t\t\t\tString k5 = alpValues.get(4);\n\n\n\t\t\t\tworkspace.command(\"set wolf-reproduce \" + k1);\n\t\t\t\tworkspace.command(\"set sheep-reproduce \" + k2);\n\t\t\t\tworkspace.command(\"set wolf-gain-from-food \" + k3);\n\t\t\t\tworkspace.command(\"set sheep-gain-from-food \" + k4);\n\t\t\t\tworkspace.command(\"set grass-regrowth-time \" + k5);\n\n \tworkspace.command(\"setup\");\n \tworkspace.command(\"repeat 500 [ go ]\");\n \t\n \tString wolves = \"\" + workspace.report(\"count wolves\");\n \tString sheep = \"\" + workspace.report(\"count sheep\");\n \t\n \tfor(int j = 0; j < 750; j++) {\n \t\tworkspace.command(\"go\");\n \t\twolves += \",\" + workspace.report(\"count wolves\");\n \t\tsheep += \",\" + workspace.report(\"count sheep\");\n \t}\n \t\n\t\tString[] wolvesValues = wolves.split(\",\");\n\t\tfloat sum = 0;\n\t\tfloat count = 0;\n\t\tfor(String value: wolvesValues){\n\t\t\tsum += Float.parseFloat(value);\n\t\t\tcount += 1;\n\t\t}\n\t\tfloat wolvesAverage = sum/count;\n\n\t\tString[] sheepValues = sheep.split(\",\");\n\t\tsum = 0;\n\t\tcount = 0;\n\t\tfor(String value: sheepValues){\n\t\t\tsum += Float.parseFloat(value);\n\t\t\tcount += 1;\n\t\t}\n\t\tfloat sheepAverage = sum/count;\n\n \tSystem.out.print(\"[(\" + String.valueOf(wolvesAverage) + \", \" + String.valueOf(sheepAverage) + \")]\\n\");\n\n }\n \n \tworkspace.dispose();\n\n }\n catch(Exception ex) {\n ex.printStackTrace();\n }\n }\n \n \n}\n" }, { "alpha_fraction": 0.6532751321792603, "alphanum_fraction": 0.6555458307266235, "avg_line_length": 32.44444274902344, "blob_id": "f761ee9cb3b527326523d8f632fdc8728f1d7713", "content_id": "b80cd3d6bab49d99e5289b85984b8d0acbde82a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5725, "license_type": "no_license", "max_line_length": 153, "num_lines": 171, "path": "/src/amf/data.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# data.py contains everything that has to do with dealing with the actual\n# data and manipulating data sets.\n\nimport random\nimport copy\nimport sys\nimport misc\nimport os\n\nIND = 5\nDEP = 6\n\ndef make_vartype(vartype_str):\n \"\"\"\n Takes a string that labels columns in a data set.\n vartype_string should either have 'i' or 'd' in it, e.g., \"iidid\"\n\n i - independent variable\n d - dependent variable\n \"\"\"\n # replace characters i and d with IND and DEP symbols.\n return tuple( (IND if c == 'i' else DEP) for c in vartype_str.strip() if not c.isspace())\n\ndef load(file_name):\n \"\"\"\n Loads a data set from a file.\n\n The data set should be space delimited to distinguish columns, and newline\n delimited to distinguish rows.\n\n The first line should contain something that is to be passed into make_vartype.\n\n \"\"\"\n\n file_h = open(file_name)\n\n vartype = make_vartype(file_h.readline().strip())\n\n dataset = []\n # go through each line and parse it\n for line in file_h.xreadlines():\n line = line.strip()\n\n # ignore empty lines\n if len(line) == 0:\n continue\n\n sline = line.split()\n if len(sline) != len(vartype):\n sys.stderr.write(\"Ignored line with wrong number of tokens: \" + line + \"\\n\")\n continue\n\n # split the data into two groups, depending on if they are independent or dependent variables.\n ind_list = []\n dep_list = []\n for idx, item in enumerate(sline):\n (ind_list if vartype[idx] == IND else dep_list).append(float(item) if item != 'NOENTRY' else 'NOENTRY')\n \n dataset.append( ( tuple(ind_list), tuple(dep_list) ) )\n\n dataset.sort()\n\n return tuple(dataset)\n\ndef random_subsets(data_set, n_list, demonstrationDescriptors=[]):\n \"\"\"\n Returns a random subsets of the dataset with sizes specified in the n list.\n There will be no duplicates between subsets.\n \"\"\"\n\n if sum(n_list) > len(data_set):\n raise IndexError, \"Requested size of subsets, from set of size %d, are too large: %s (total size %d)\" % (len (data_set), repr(n_list), sum(n_list))\n\n # generate a list of the indexes, then shuffle them. I will use these to randomly select from the data set.\n idxs = range(len(data_set))\n\n # this is where the randomness lies\n #random.seed(0)\n\n # idxs contain indexes of all elements in dataset\n # we should get the top sum(n_list) values\n # then use those to form random subsets of length specified by n_list\n\n # check number of demonstration descriptors\n demonstrationCount = len(demonstrationDescriptors)\n\n # obtain and shuffle indexes for randomness\n # divides equally per demonstration slp\n # works better when a single slp is specified\n\n # dataset selection used if set to true\n if(eval(os.environ[\"DATASET_SELECTION\"]) == 1):\n useClosestIdx = True\n else:\n useClosestIdx = False\n\n #if(type(depData) == tuple):\n if(useClosestIdx == True):\n # we need to divide the dataset to cover all of these\n # edit last item to account for correctness of sum\n # shuffle for random allotment of extra points\n nListSubLengths = [sum(n_list)/demonstrationCount]\n\n nListSubLengths[-1] += sum(n_list)%demonstrationCount\n random.shuffle(nListSubLengths)\n\n # indexes corresponding to good points\n goodIdxs = []\n \n # for each sublength\n # corresponding to each demonstration\n for index in range(len(nListSubLengths)):\n # if feedback is on\n # compute distances based on queried slps and not demonstration descriptors\n if((eval(os.environ[\"FEEDBACK\"]) == 1) and (os.path.isfile(\"queried_slps.txt\") == True)):\n with open(\"queried_slps.txt\", \"r\") as inFile:\n queriedSlps = [eval(inFile.readlines()[0])]\n distances = [(misc.distance(queriedSlps[index], data_set[idx][1]), idx) for idx in idxs]\n else:\n # get distances of all descriptors in dataset\n # with respect to the input descriptor\n distances = [(misc.distance(demonstrationDescriptors[index], data_set[idx][1]), idx) for idx in idxs]\n\n distances.sort()\n\n # extract first few as needed\n nListSubLength = nListSubLengths[index]\n goodDataPoints = distances[:nListSubLength]\n\n # append to list of all points\n goodIdxs += [idx for distance, idx in goodDataPoints]\n\n random.shuffle(goodIdxs)\n else:\n # point sampling has already been done from the full dataset\n # now we are dealing with individual dep value correspondences for a given ind configuration\n random.shuffle(idxs)\n \n # now form subsets based on sequence of indexes\n subsets = []\n cur_offset = 0\n for subset_size in n_list:\n #if(type(depData) == tuple):\n if(useClosestIdx == True):\n # we are selecting from the full dataset\n # this dataset does not contain individual slp correspondence\n subsets.append( tuple(data_set[idx] for idx in goodIdxs[cur_offset:cur_offset + subset_size]) )\n else:\n # point sampling has already been done from the full dataset\n # now we are dealing with individual dep value correspondences for a given ind configuration\n subsets.append( tuple(data_set[idx] for idx in idxs[cur_offset:cur_offset + subset_size]) )\n\n cur_offset += subset_size\n\n return subsets\n\ndef split_dep(data_set):\n \"\"\"\n Split the data set into one-data-set-per-dependent variable.\n \"\"\"\n\n num_dep = len(data_set[0][1])\n\n sets = [ [] for idx in range(num_dep) ]\n\n for ind, dep in data_set:\n # for each dependent variable here...\n for d_idx in range(num_dep):\n sets[d_idx].append((ind, dep[d_idx]))\n \n return sets\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.638373613357544, "alphanum_fraction": 0.660858154296875, "avg_line_length": 37.121429443359375, "blob_id": "6b842ce6a5a8435cd45f99cfb693f0cb89905ba2", "content_id": "e48c3eaf9d0e8475ff26fe2768343c05d4fb1ab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5337, "license_type": "no_license", "max_line_length": 109, "num_lines": 140, "path": "/data/dataset_analysis/dataset_analysis.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy\nimport sys\n\ndef read_dataset(fileName):\n dataset = []\n \n with open(fileName + \"/slp_dataset.txt\", \"r\") as slpDatasetFile:\n for line in slpDatasetFile:\n point = eval(line)\n dataset.append(point)\n\n return dataset\n\ndef polarize_matrix(matrix, threshold=0):\n # polarize matrix values\n [rows, columns] = matrix.shape\n\n for row in range(rows):\n for column in range(columns):\n if(matrix[row][column] > threshold):\n matrix[row][column] = 1\n else:\n matrix[row][column] = 0\n\ndef analyze_datasets(goodDatasetFolders=[], badDatasetFolders=[], figureSuffix = \"\"):\n # determine comparison method for quality of datasets\n\n # demonstration specific data\n demonstrationID = sys.argv[1]\n \n # get all points in good dataset\n goodDataset = []\n for folder in goodDatasetFolders:\n goodDataset += read_dataset(demonstrationID + \"/\" + \"predictions_\" + folder)\n\n # get all points in bad dataset\n badDataset = []\n for folder in badDatasetFolders:\n badDataset += read_dataset(demonstrationID + \"/\" + \"predictions_\" + folder)\n\n # define plot ranges\n # each value represents a threshold for a bucket\n dim1Step = eval(sys.argv[2])\n dim2Step = eval(sys.argv[3])\n dim1ThresholdValues = numpy.arange(0, 1, dim1Step)\n dim2ThresholdValues = numpy.arange(0, 1, dim2Step)\n\n # now compute matrix based on discrete values\n goodDensityMap = numpy.zeros((len(dim1ThresholdValues), len(dim2ThresholdValues)))\n badDensityMap = numpy.zeros((len(dim1ThresholdValues), len(dim2ThresholdValues)))\n\n # for good datasets\n for point in goodDataset:\n # determine bucket for this point\n [dim1Value, dim2Value] = point\n\n # will be an integer value between 0 and len(dim1ThresholdValues)-1\n # will be an integer value between 0 and len(dim2ThresholdValues)-1\n dim1Bucket = int(dim1Value/dim1Step)\n dim2Bucket = int(dim2Value/dim2Step)\n\n # account for upper limits\n # use capping\n if(dim1Bucket == len(dim1ThresholdValues)):\n dim1Bucket = len(dim1ThresholdValues) - 1\n if(dim2Bucket == len(dim2ThresholdValues)):\n dim2Bucket = len(dim2ThresholdValues) - 1\n\n # add count to matrix\n goodDensityMap[dim1Bucket][dim2Bucket] += 1\n\n # for bad datasets\n for point in badDataset:\n # determine bucket for this point\n [dim1Value, dim2Value] = point\n\n # will be an integer value between 0 and len(dim1ThresholdValues) - 1\n # will be an integer value between 0 and len(dim2ThresholdValues) - 1\n dim1Bucket = int(dim1Value/dim1Step) - 1\n dim2Bucket = int(dim2Value/dim2Step) - 1\n\n # account for upper limits\n # use capping\n if(dim1Bucket == len(dim1ThresholdValues)):\n dim1Bucket = len(dim1ThresholdValues) - 1\n if(dim2Bucket == len(dim2ThresholdValues)):\n dim2Bucket = len(dim2ThresholdValues) - 1\n \n # add count to matrix\n badDensityMap[dim1Bucket][dim2Bucket] += 1\n\n # compute difference\n # and normalize\n goodDifferenceDensityMap = goodDensityMap - badDensityMap\n badDifferenceDensityMap = badDensityMap - goodDensityMap\n\n polarize_matrix(goodDifferenceDensityMap, 0)\n polarize_matrix(badDifferenceDensityMap, 0)\n\n # splice matrices if needed\n # to zoom in on finer sections\n #goodDifferenceDensityMap = goodDifferenceDensityMap[:10, :10]\n #badDifferenceDensityMap = badDifferenceDensityMap[:10, :10]\n #dim1ThresholdValues = dim1ThresholdValues[:10]\n #dim2ThresholdValues = dim2ThresholdValues[:10]\n \n plt.figure()\n plt.imshow(goodDifferenceDensityMap, interpolation='nearest', cmap=plt.cm.gray)\n plt.ylabel(\"Maximum Area\")\n plt.xlabel(\"Maximum Perimeter\")\n #plt.title(\"Good Difference Density Map\")\n plt.yticks(range(len(dim1ThresholdValues)), dim1ThresholdValues)\n plt.xticks(range(len(dim2ThresholdValues)), dim2ThresholdValues)\n plt.savefig(demonstrationID + \"/\" + \"good_difference_density_map\" + figureSuffix + \".png\")\n plt.close(\"all\")\n \n plt.figure()\n plt.imshow(badDifferenceDensityMap, interpolation='nearest', cmap=plt.cm.gray)\n plt.ylabel(\"Maximum Area\")\n plt.xlabel(\"Maximum Perimeter\")\n #plt.title(\"Bad Difference Density Map\")\n plt.yticks(range(len(dim1ThresholdValues)), dim1ThresholdValues)\n plt.xticks(range(len(dim2ThresholdValues)), dim2ThresholdValues)\n plt.savefig(demonstrationID + \"/\" + \"bad_difference_density_map\" + figureSuffix + \".png\")\n plt.close(\"all\")\n \ndef main():\n analyze_datasets([\"best\", \"second_best\", \"third_best\"], [\"worst\", \"second_worst\", \"third_worst\"], \"_all\")\n analyze_datasets([\"best\"], [\"worst\"], \"_1_1\")\n analyze_datasets([\"best\"], [\"second_worst\"], \"_1_2\")\n analyze_datasets([\"best\"], [\"third_worst\"], \"_1_3\")\n analyze_datasets([\"second_best\"], [\"worst\"], \"_2_1\")\n analyze_datasets([\"second_best\"], [\"second_worst\"], \"_2_2\")\n analyze_datasets([\"second_best\"], [\"third_worst\"], \"_2_3\")\n analyze_datasets([\"third_best\"], [\"worst\"], \"_3_1\")\n analyze_datasets([\"third_best\"], [\"second_worst\"], \"_3_2\")\n analyze_datasets([\"third_best\"], [\"third_worst\"], \"_3_3\")\n \nmain()\n" }, { "alpha_fraction": 0.6425849199295044, "alphanum_fraction": 0.6641530990600586, "avg_line_length": 32.789039611816406, "blob_id": "9bc73487b95b665c1d75851548f8c07f7c7dad62", "content_id": "57cbd55a319ed9d96c686d00e8557615b2489af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12333, "license_type": "no_license", "max_line_length": 127, "num_lines": 365, "path": "/src/sampling/utils/featurize_image.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# libraries used\nimport cv2\nimport numpy as np\nfrom skimage.feature import local_binary_pattern\nfrom skimage.io import imread\nfrom skimage.transform import resize\nfrom keras import backend as K\nfrom keras.models import model_from_json\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import load_img, img_to_array\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Convolution3D\nfrom keras.layers.convolutional_recurrent import ConvLSTM2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Dense, Activation\nfrom keras.layers.core import Reshape, Flatten\n\nimport os\n\n# using magic numbers for now\n\n# feature extraction initialization\ndef get_network(descriptorSetting, pathPrefix=None):\n # load feature extractor if needed\n net = None\n\n # prefix to folder location\n if(pathPrefix == None):\n pathPrefix = \"\"\n\n functionName = \"get_\" + descriptorSetting.lower() + \"_network\"\n if(functionName in globals().keys()):\n net = globals()[functionName](pathPrefix)\n\n return net\n\n# feature extraction\ndef get_features(net, imageName, descriptorSetting):\n # initialization\n features = None\n\n functionName = \"get_\" + descriptorSetting.lower() + \"_features\"\n features = globals()[functionName](net, imageName)\n \n # return features\n return features\n\n# saved keras network\ndef get_saved_keras_network(modelFile, weightFile):\n # load and compile saved network\n jsonFile = open(modelFile, 'r')\n modelJson = jsonFile.read()\n jsonFile.close()\n net = model_from_json(modelJson)\n net.load_weights(weightFile)\n net.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])\n\n # return network\n return net\n\n# contour information\ndef get_contour_features(net, imageName):\n imageData = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)\n\n # dilate for better contour extraction\n kernel = np.ones((5,5),np.uint8)\n dilatedImageData = cv2.dilate(imageData, kernel, iterations=4)\n #cv2.imwrite(imageName[:-4] + \"_dilated.png\", dilatedImageData)\n\n # extract contours\n ret, threshold = cv2.threshold(dilatedImageData, 127, 255, 0)\n contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n contouredImageData = np.zeros(dilatedImageData.shape)\n for i in xrange(len(contours)):\n cv2.drawContours(contouredImageData, contours, i, (255, 255, 255), 1, 8, hierarchy) \n #cv2.imwrite(imageName[:-4] + \"_contoured.png\", contouredImageData)\n\n # extract largest contour area and perimeter\n areas = []\n perimeters = []\n\n for contour in contours:\n area = cv2.contourArea(contour)\n perimeter = cv2.arcLength(contour, True)\n areas.append(area)\n perimeters.append(perimeter)\n\n # define dependent values\n maxArea = max(areas)\n maxPerimeter = max(perimeters)\n features = [maxArea, maxPerimeter]\n return features\n\nimport sys\n\n# density information\ndef get_density_features(net, imageName, blocks=eval(os.environ[\"NUM_DEPENDENT\"])):\n # read image\n image = imread(imageName, as_grey=True)\n \n # resize to a pixels power of 2\n # required for equal division when splitting\n resizeDimension = 2**int(min([np.log2(x) for x in image.shape]))\n image = resize(image, (resizeDimension, resizeDimension), mode=\"wrap\")\n \n # polarize pixels\n threshold = np.amax(image)\n image[image < threshold] = 0\n image[image >= threshold] = 1\n\n # segment image\n # segments must be a power of 4\n # because each block is split into 4 sub blocks\n currentBlocksCount = 1\n blocksList = np.array([image])\n \n while(currentBlocksCount < blocks):\n # split horizontally\n subBlocksList = []\n for block in blocksList:\n subBlocks = np.hsplit(block, 2)\n subBlocksList.extend(subBlocks)\n blocksList = subBlocksList\n\n # split vertically\n subBlocksList = []\n for block in blocksList:\n subBlocks = np.vsplit(block, 2)\n subBlocksList.extend(subBlocks)\n blocksList = subBlocksList\n\n # update number of blocks\n currentBlocksCount *= 4\n \n # compute density\n # since all images are the same size, we can use count instead\n features = [x.sum() for x in blocksList]\n \n return features\n \n# vgg network\ndef get_vgg_network(pathPrefix):\n # pretrained vgg16 network\n net = VGG16(include_top=True, weights='imagenet', input_tensor=None)\n return net\n \n# vgg based features\ndef get_vgg_features(net, imageName):\n # preprocess image\n imageData = image.load_img(imageName, target_size=(224, 224))\n imageData = image.img_to_array(imageData)\n imageData = np.expand_dims(imageData, axis=0)\n imageData = preprocess_input(imageData)\n\n # final layer output\n features = net.predict(imageData)[0]\n return features\n\n# lbp features\ndef get_lbp_features(net, imageName):\n # settings for LBP\n METHOD = 'uniform'\n radius = 3\n nPoints = 8 * radius\n nBins = 128\n \n # dilate for better contour extraction\n imageData = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)\n kernel = np.ones((5,5),np.uint8)\n dilatedImageData = cv2.dilate(imageData, kernel, iterations=4)\n #cv2.imwrite(imageName[:-4] + \"_dilated.png\", dilatedImageData)\n\n # compute normalized histogram distribution\n lbp = local_binary_pattern(dilatedImageData, nPoints, radius, METHOD)\n hist, binEdges = np.histogram(lbp, bins=nBins, range=(0, nBins), density=True)\n\n # define dependent values\n features = hist\n return features\n \n# mnist network\ndef get_mnist_network(pathPrefix):\n # path to saved network\n modelFile = pathPrefix + \"keras/mnist_cnn_model.json\"\n weightFile = pathPrefix + \"keras/mnist_cnn_model.h5\"\n \n # return network\n net = get_saved_keras_network(modelFile, weightFile)\n return net\n\n# mnist based convolutional features\ndef get_mnist_features(net, imageName):\n # output layer specification\n LAYER_NUMBER = 7\n LEARNING_PHASE = 0\n\n # dilate for better contour extraction\n imageData = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)\n kernel = np.ones((5,5),np.uint8)\n dilatedImageData = cv2.dilate(imageData, kernel, iterations=4)\n #cv2.imwrite(imageName[:-4] + \"_dilated.png\", dilatedImageData)\n\n imageData = resize(dilatedImageData, (28, 28))\n imageData = imageData.reshape(1, 1, 28, 28)\n\n # layer output\n get_layer_output = K.function([net.layers[0].input, K.learning_phase()],\n [net.layers[LAYER_NUMBER].output])\n layerOutput = get_layer_output([imageData, LEARNING_PHASE])[0]\n features = layerOutput[0]\n return features\n\n# resnet network\ndef get_resnet50_network(pathPrefix):\n # pretrained resnet50 network\n net = ResNet50(include_top=True, weights='imagenet', input_tensor=None)\n return net\n\n# resnet based features\ndef get_resnet50_features(net, imageName):\n # preprocess image\n dimensionOrdering = K.image_dim_ordering()\n mean = (103.939, 116.779, 123.68)\n imageData = load_img(imageName, target_size=(224, 224))\n imageData = img_to_array(imageData, dim_ordering=dimensionOrdering)\n\n if dimensionOrdering == 'th':\n imageData[0, :, :] -= mean[0]\n imageData[1, :, :] -= mean[1]\n imageData[2, :, :] -= mean[2]\n # 'RGB'->'BGR'\n imageData = imageData[::-1, :, :]\n else:\n imageData[:, :, 0] -= mean[0]\n imageData[:, :, 1] -= mean[1]\n imageData[:, :, 2] -= mean[2]\n imageData = imageData[:, :, ::-1]\n\n imageData = np.expand_dims(imageData, axis=0)\n\n # final layer output\n features = net.predict(imageData)[0]\n return features\n\n# autoencoder network\ndef get_vae_network(pathPrefix):\n # path to saved network\n modelFile = pathPrefix + \"keras/variational_autoencoder_model.json\"\n weightFile = pathPrefix + \"keras/variational_autoencoder_model.h5\"\n\n # return network\n net = get_saved_keras_network(modelFile, weightFile)\n return net\n\n# autoencoder based features\ndef get_vae_features(net, imageName):\n # output layer specification\n LAYER_NUMBER = 3\n LEARNING_PHASE = 0\n X_TRAIN_SHAPE = tuple([1,784])\n\n # preprocess image\n imageData = imread(imageName, as_grey=True)\n imageData = resize(imageData, (28, 28))\n imageData = imageData.flatten()\n imageData = imageData.astype('float32') / 255.\n imageData = imageData.reshape((1, np.prod(X_TRAIN_SHAPE[1:])))\n\n # layer output\n get_layer_output = K.function([net.layers[0].input, K.learning_phase()],\n [net.layers[LAYER_NUMBER].output])\n layer_output = get_layer_output([imageData, LEARNING_PHASE])[0]\n features = layer_output[0]\n return features\n\n# conv lstm network\ndef get_lstm_network(pathPrefix):\n # path to saved network\n modelFile = pathPrefix + \"keras/conv_lstm_model.json\"\n weightFile = pathPrefix + \"keras/conv_lstm_model.h5\"\n\n #net = get_saved_keras_network_lstm(modelFile, weightFile)\n\n # manually specify network\n # need to do this because convLTSM layer gives error\n net = Sequential()\n net.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n input_shape=(None, 40, 40, 1),\n batch_input_shape=(None, 11, 40, 40, 1),\n border_mode='same', return_sequences=True))\n net.add(BatchNormalization())\n net.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n border_mode='same', return_sequences=True))\n net.add(BatchNormalization())\n net.add(Flatten())\n net.add(Dense(128))\n net.add(Dense(1600*11))\n net.add(Reshape((11, 40, 40, 1)))\n net.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n border_mode='same', return_sequences=True))\n net.add(BatchNormalization())\n net.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\n border_mode='same', return_sequences=True))\n net.add(BatchNormalization())\n net.add(Convolution3D(nb_filter=1, kernel_dim1=1, kernel_dim2=3,\n kernel_dim3=3, activation='sigmoid',\n border_mode='same', dim_ordering='tf'))\n net.load_weights(weightFile)\n net.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])\n\n # return network\n return net\n\n# convolutional lstm based features\ndef get_lstm_features(net, imageName):\n # output layer specification\n LAYER_NUMBER = 5\n LEARNING_PHASE = 0\n\n # generate movie\n rows = 40\n columns = 40\n numberOfSamples = 1\n numberOfFrames = 11\n shiftedMovie = np.zeros((numberOfSamples, numberOfFrames, rows, columns, 1), dtype=np.float)\n imageNameSplit = imageName.split(\"_\")\n\n # stitch all but last\n imageSeriesName = \"\"\n for index in range(len(imageNameSplit)-1):\n splitItem = imageNameSplit[index]\n imageSeriesName += splitItem + \"_\"\n imageSeriesName = imageSeriesName[:-1]\n\n # image series name should be the entire thing up to underscore \n for i in range(numberOfSamples):\n # read custom dataset\n for t in range(numberOfFrames):\n currentFrameName = imageSeriesName + \"_\" + str(200 + t) + \".png\"\n \n # read and resize to 40x40\n imageData = imread(currentFrameName, as_grey=True)\n imageData = resize(imageData, (40, 40)) \n shiftedMovie[i, t, :, :, 0] = imageData\n\n # layer output\n get_layer_output = K.function([net.layers[0].input, K.learning_phase()],\n [net.layers[LAYER_NUMBER].output])\n layer_output = get_layer_output([shiftedMovie, LEARNING_PHASE])[0]\n features = layer_output[0]\n return features\n\n# image search features\ndef get_image_match_features(net, imageName):\n # use a system call to invoke the python3 library using this image name\n os.system(\"python3 sampling/utils/get_image_match_features.py \" + imageName)\n\n # use temporary file for communication\n features = np.load(\"image_match_features.npy\")\n\n return features\n" }, { "alpha_fraction": 0.7101449370384216, "alphanum_fraction": 0.7753623127937317, "avg_line_length": 22, "blob_id": "096d119115dfd8c1aeb7a1118d0b6beb8c5647c3", "content_id": "bdc936d5c28554fd29fc750fdb90f22ede3ee6e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/src/domainconfs/flocking.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "TRAINING_SIZE = 330\nVALIDATION_SIZE = 100\nRM_GRANULARITY = 40\nREGRESSION = \"sklearn.neighbors.KNeighborsRegressor()\"\n\nNUM_INDEPENDENT = 3\n" }, { "alpha_fraction": 0.6039416193962097, "alphanum_fraction": 0.6153117418289185, "avg_line_length": 30.02941131591797, "blob_id": "1f1e7cbdbe4e657662a152df12145edfc3c467b7", "content_id": "e875ace109e35918df732c93332cbba1d4dcda2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5277, "license_type": "no_license", "max_line_length": 137, "num_lines": 170, "path": "/src/configuration.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n#######################################################################\n# this is the only thing that should be edited when using this script #\n#######################################################################\n\n# abm being used\nexport ABM=\"flocking\"\n#export ABM=\"civil_violence\"\n#export ABM=\"forest_fire\"\n#export ABM=\"aids\"\n#export ABM=\"wolf_sheep\"\n#export ABM=\"eum\"\n#export ABM=\"schelling\"\n#export ABM=\"turbulence\"\n\n# use of disk to reduce ram consumption\nexport WRITE_TO_DISK=0\n\n# use of demonstration images is off by default \nexport IMAGES=0\n\n# check if this abm depends on images\nif [ $ABM == \"flocking\" ] || [ $ABM == \"turbulence\" ]; then\n export IMAGES=1\nfi\n\n# if feature reduction has been used\nexport REDUCED=0\n\n# if image features are uniform across all stages of the framework\n# used during filtering\nexport IMAGE_FEATURIZATION_HOMOGENEOUS=0\n\n# descriptor setting if any\nexport DESCRIPTOR_SETTING=\"\"\n\n# check if this abm depends on images\n# specify which descriptor to use\nif [ $IMAGES == 1 ]; then\n # determine number of dependent variables\n #export DESCRIPTOR_SETTING=\"DENSITY\"\n export DESCRIPTOR_SETTING=\"CONTOUR\"\n #export DESCRIPTOR_SETTING=\"LBP\"\n #export DESCRIPTOR_SETTING=\"MNIST\"\n #export DESCRIPTOR_SETTING=\"VAE\"\n #export DESCRIPTOR_SETTING=\"LSTM\"\n #export DESCRIPTOR_SETTING=\"VGG\"\n #export DESCRIPTOR_SETTING=\"RESNET50\"\n #export DESCRIPTOR_SETTING=\"IMAGE_MATCH\"\nfi\n\n# check for override in case of filtering\nif [ ! -z $USE_FILTER_DESCRIPTOR_SETTING ]; then\n if [ $USE_FILTER_DESCRIPTOR_SETTING == 1 ]; then\n #export DESCRIPTOR_SETTING=\"RESNET50\"\n\t#export DESCRIPTOR_SETTING=\"IMAGE_MATCH\"\n\texport DESCRIPTOR_SETTING=\"CONTOUR\"\n fi\nfi\n\n# distance metric used for filtering\n# euclidean assumed by default\nexport DISTANCE_METHOD=\"EUCLIDEAN\"\n#export DISTANCE_METHOD=\"NORMALIZED\"\n\n############\n# feedback #\n############\n\nexport FEEDBACK=0\nexport LEARNING_RATE_FM_DATASET=0.0\nexport LEARNING_RATE_REGRESSION_PARAMETER=1.0\nexport REGRESSION_PARAMETER_LOWER_LIMIT=1\nexport REGRESSION_PARAMETER_UPPER_LIMIT=100\n\n#####################\n# dataset selection #\n#####################\n\nexport DATASET_SELECTION=1\n\n#########################\n# refine configurations #\n#########################\n\nexport CONFIGURATIONS_PRUNING=1\nexport CONFIGURATIONS_OUTLIER_DETECTION=1\n\n#############################\n# constants for this script #\n#############################\n\n# demonstration file\nexport DEMONSTRATION_FOLDER=\"../data/demonstrations/\"$ABM\n\n# configuration file\nexport CONFIGURATION_FILE=\"domainconfs/\"$ABM\".py\"\n\n# data file\n# if image processing was used, append descriptor setting to processed filename\nif [ $IMAGES == 1 ]; then\n export DESCRIPTOR_SETTING_LOWER_CASE=$(echo $DESCRIPTOR_SETTING | tr \"[:upper:]\" \"[:lower:]\")\n export DATA_FILE=\"../data/domaindata/processed_\"$ABM\"_\"$DESCRIPTOR_SETTING_LOWER_CASE\n\n if [ $REDUCED == 1 ]; then\n\t export DATA_FILE=$DATA_FILE\"_reduced\"\n fi\n \n export DATA_FILE=$DATA_FILE\".txt\"\nelse\n export DATA_FILE=\"../data/domaindata/processed_\"$ABM\".txt\"\nfi\n\n# scaling information\nexport SCALE_DATA_FILE=$(cat \"map.py\" | grep SCALE_DATA_FILE | head -n 1 | sed s/\\\"/\\\\n/g | sed -n 2p)\n\n##########################################\n# abm simulation tool and file extension #\n# one entry required per abm #\n##########################################\n\nif [ $ABM == \"civil_violence\" ] || [ $ABM == \"forest_fire\" ] || [ $ABM == \"eum\" ] || [ $ABM == \"schelling\" ]; then\n export SIMULATION_TOOL=\"python3\"\n export SIMULATION_EXTENSION=\".py\"\nelif [ $ABM == \"flocking\" ] || [ $ABM == \"aids\" ] || [ $ABM == \"wolf_sheep\" ] || [ $ABM == \"turbulence\" ]; then\n export SIMULATION_TOOL=\"java\"\n export SIMULATION_EXTENSION=\"\"\nfi\n\n############################################################\n# image featurization details, if any #\n# entry required for abms using images #\n# one entry required per abm for nuber of dependent values #\n############################################################\n\n# default number of dependent values\nexport NUM_DEPENDENT=0\n\n# check if this abm depends on images\nif [ $IMAGES == 1 ]; then\n # determine number of dependent variables\n # based on image feature descriptors\n \n if [ \"$DESCRIPTOR_SETTING\" == \"DENSITY\" ]; then\n\t export NUM_DEPENDENT=1\n elif [ \"$DESCRIPTOR_SETTING\" == \"CONTOUR\" ]; then\n\t export NUM_DEPENDENT=2\n elif [ \"$DESCRIPTOR_SETTING\" == \"LBP\" ]; then\n\t export NUM_DEPENDENT=128\n elif [ \"$DESCRIPTOR_SETTING\" == \"MNIST\" ]; then\n\t export NUM_DEPENDENT=128\n elif [ \"$DESCRIPTOR_SETTING\" == \"VAE\" ]; then\n\t export NUM_DEPENDENT=128\n elif [ \"$DESCRIPTOR_SETTING\" == \"LSTM\" ]; then\n\t export NUM_DEPENDENT=128\n elif [ \"$DESCRIPTOR_SETTING\" == \"VGG\" ]; then\n\t export NUM_DEPENDENT=1000\n elif [ \"$DESCRIPTOR_SETTING\" == \"RESNET50\" ]; then\n\t export NUM_DEPENDENT=1000\n elif [ \"$DESCRIPTOR_SETTING\" == \"IMAGE_MATCH\" ]; then\n\t export NUM_DEPENDENT=648\n fi\n\n if [ $REDUCED == 1 ]; then\n\t export NUM_DEPENDENT=50\n fi\nelse\n # read from domainconfs\n export NUM_DEPENDENT=$(python -c \"execd = {} ; exec(open(\\\"\"$CONFIGURATION_FILE\"\\\").read(), execd) ; print execd[\\\"NUM_DEPENDENT\\\"]\")\nfi\n\n\n" }, { "alpha_fraction": 0.8611111044883728, "alphanum_fraction": 0.8611111044883728, "avg_line_length": 23, "blob_id": "91a90a5b5253a3c94bfff9c6a258b67f658d2be8", "content_id": "2c486c490670b67cd6854c51490477641d70b6fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/src/amf/__init__.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import data\nfrom fm import ForwardMapping\nfrom rm import ReverseMapping\n" }, { "alpha_fraction": 0.7781955003738403, "alphanum_fraction": 0.7819548845291138, "avg_line_length": 19.461538314819336, "blob_id": "e5745ea83081fe921aafb9494a65c6615c46565d", "content_id": "e9f129c08b7512e45d606f8ccfaf1ecdf4784d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/src/sampling/utils/get_image_match_features.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from image_match.goldberg import ImageSignature\nimport sys\nimport numpy as np\n\n# input file\nimageName = sys.argv[1]\n\n# generate signature\ngis = ImageSignature()\nfeatures = gis.generate_signature(imageName)\n\n# save signature\nnp.save(\"image_match_features\", features)\n" }, { "alpha_fraction": 0.612515926361084, "alphanum_fraction": 0.6263416409492493, "avg_line_length": 27.48186492919922, "blob_id": "8b3e6e1918ce334b2a4a0e87e0cd1a80681be8e5", "content_id": "cce442fd6bc75d265c10ec508ed72c6399a6dfba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5497, "license_type": "no_license", "max_line_length": 83, "num_lines": 193, "path": "/data/dataset_analysis/plot_alps_slps.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# snippet from lfd.py\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nwith open(\"1/predictions_best/alp_dataset_1488308839.txt\", \"r\") as alpDatasetFile:\n alpDataset = alpDatasetFile.readlines()\n alpPoints = [eval(x) for x in alpDataset]\n\n m = \"o\"\n c = \"r\"\n \n # plot ALPs\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plotName = \"alp_points_best\" + \".png\"\n alpPoints = zip(*alpPoints)\n [xs, ys, zs] = alpPoints\n ax.scatter(xs, ys, zs, c=c, marker=m)\n ax.set_xlabel(\"Max-align-turn\")\n ax.set_ylabel(\"Max-cohere-turn\")\n ax.set_zlabel(\"Max-separate-turn\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"alp_points_dim1_best\" + \".png\"\n plt.hist(xs, color=c)\n plt.xlabel(\"Max-align-turn\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"alp_points_dim2_best\" + \".png\"\n plt.hist(ys, color=c)\n plt.xlabel(\"Max-cohere-turn\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"alp_points_dim3_best\" + \".png\"\n plt.hist(zs, color=c)\n plt.xlabel(\"Max-separate-turn\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\nwith open(\"1/predictions_worst/alp_dataset_1488319453.txt\", \"r\") as alpDatasetFile:\n alpDataset = alpDatasetFile.readlines()\n alpPoints = [eval(x) for x in alpDataset]\n\n m = \"o\"\n c = \"r\"\n \n # plot ALPs\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plotName = \"alp_points_worst\" + \".png\"\n alpPoints = zip(*alpPoints)\n [xs, ys, zs] = alpPoints\n ax.scatter(xs, ys, zs, c=c, marker=m)\n ax.set_xlabel(\"Max-align-turn\")\n ax.set_ylabel(\"Max-cohere-turn\")\n ax.set_zlabel(\"Max-separate-turn\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"alp_points_dim1_worst\" + \".png\"\n plt.hist(xs, color=c)\n plt.xlabel(\"Max-align-turn\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"alp_points_dim2_worst\" + \".png\"\n plt.hist(ys, color=c)\n plt.xlabel(\"Max-cohere-turn\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"alp_points_dim3_worst\" + \".png\"\n plt.hist(zs, color=c)\n plt.xlabel(\"Max-separate-turn\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\nwith open(\"1/predictions_best/slp_dataset_1488308839.txt\", \"r\") as slpDatasetFile:\n slpDataset = slpDatasetFile.readlines()\n slpPoints = [eval(x) for x in slpDataset]\n\n m = \"o\"\n c = \"r\"\n \n # plot SLPs\n fig = plt.figure()\n plotName = \"slp_points_best\" + \".png\"\n slpPoints = zip(*slpPoints)\n [xs, ys] = slpPoints\n plt.plot(xs, ys, c+m)\n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Maximum Perimeter\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"slp_points_dim1_best\" + \".png\"\n plt.hist(xs, color=c)\n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"slp_points_dim2_best\" + \".png\"\n plt.hist(ys, color=c)\n plt.xlabel(\"Maximum Perimeter\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\nwith open(\"1/predictions_worst/slp_dataset_1488319453.txt\", \"r\") as slpDatasetFile:\n slpDataset = slpDatasetFile.readlines()\n slpPoints = [eval(x) for x in slpDataset]\n\n m = \"o\"\n c = \"r\"\n \n # plot SLPs\n fig = plt.figure()\n plotName = \"slp_points_worst\" + \".png\"\n slpPoints = zip(*slpPoints)\n [xs, ys] = slpPoints\n plt.plot(xs, ys, c+m)\n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Maximum Perimeter\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"slp_points_dim1_worst\" + \".png\"\n plt.hist(xs, color=c)\n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"slp_points_dim2_worst\" + \".png\"\n plt.hist(ys, color=c)\n plt.xlabel(\"Maximum Perimeter\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n\nwith open(\"slp_dataset_1488996573.txt\", \"r\") as slpDatasetFile:\n slpDataset = slpDatasetFile.readlines()\n slpPoints = [eval(x) for x in slpDataset]\n\n m = \"o\"\n c = \"r\"\n \n # plot SLPs\n fig = plt.figure()\n plotName = \"slp_points_guided\" + \".png\"\n slpPoints = zip(*slpPoints)\n [xs, ys] = slpPoints\n plt.plot(xs, ys, c+m)\n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Maximum Perimeter\")\n plt.savefig(plotName)\n plt.close(fig)\n\n fig = plt.figure()\n plotName = \"slp_points_dim1_guided\" + \".png\"\n plt.hist(xs, color=c)\n plt.xlabel(\"Maximum Area\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n \n fig = plt.figure()\n plotName = \"slp_points_dim2_guided\" + \".png\"\n plt.hist(ys, color=c)\n plt.xlabel(\"Maximum Perimeter\")\n plt.ylabel(\"Number of points in each value range\")\n plt.savefig(plotName)\n plt.close(fig)\n" }, { "alpha_fraction": 0.7403846383094788, "alphanum_fraction": 0.7403846383094788, "avg_line_length": 33.66666793823242, "blob_id": "9e9e52a4dc407c0845d04708b7e1dbeb1c6a2e3c", "content_id": "486c468e6c5a58f1286525eeb12b49523ad3a940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 104, "license_type": "no_license", "max_line_length": 65, "num_lines": 3, "path": "/src/sampling/sampling_raw.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# sample data\necho \"==>sampling data\"\n$SIMULATION_TOOL $ABM/sample_$ABM$SIMULATION_EXTENSION > $ABM.txt\n" }, { "alpha_fraction": 0.587643027305603, "alphanum_fraction": 0.612356960773468, "avg_line_length": 25.325302124023438, "blob_id": "d859a85c93f92e7398f42515ddbe06ccd838d28b", "content_id": "75b4b605c3ff80e7cd04c5b542a8378370bd666b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2185, "license_type": "no_license", "max_line_length": 99, "num_lines": 83, "path": "/src/amf/misc.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# misc.py has a bunch of random functions that are used now and then.\n# This is intended as a set of \"helper\" tools, and probably do not need to be used by the user\n# hence, why it is not imported in __init__.py\n\nimport time\nimport math\nimport numpy\nimport sys\n\ndef distance(v1, v2):\n if len(v1) != len(v2):\n raise ValueError, \"distance: vectors %s and %s are different lengths.\" % (repr(v1), repr(v2))\n\n runsum = 0.0\n for idx in xrange(len(v1)):\n runsum += (v2[idx] - v1[idx]) ** 2\n\n return runsum / len(v1)\n\n #return sum( (x2 - x1)**2 for x1, x2 in zip(v1, v2) ) / float(len(v1))\n\n# subtract two lists from one another\ndef list_sub(v1, v2):\n return tuple( x1 - x2 for x1, x2 in zip(v1, v2) )\n\ndef list_add(v1, v2):\n return tuple( x1 + x2 for x1, x2 in zip(v1, v2) )\n\ndef col_average(lists):\n return [ col / len(lists) for col in col_sum(lists) ]\n\ndef average(l):\n return sum(l) / float(len(l))\n\n#def stddev(lists):\n# return tuple(numpy.std(l) for l in lists)\n\ndef col_sum(lists):\n return col_reduce(lambda x, y: x + y, lists)\n\ndef col_max(lists):\n return col_reduce(max, lists)\n\ndef col_min(lists):\n return col_reduce(min, lists)\n\ndef get_col(lists, col):\n \"\"\" Retrieve a single column from a data set as a list. \"\"\"\n\n return tuple( row[col] for row in lists )\n\ndef col_reduce(function, matrix):\n \"\"\" performs a 'reduce' operation on each column of a matrix \"\"\"\n\n cur = list(matrix[0])\n\n for row in matrix[1:]:\n for idx, item in enumerate(row):\n cur[idx] = function(cur[idx], item)\n\n return tuple(cur)\n\ndef xfrange(start, stop, step):\n if start > stop and step > 0.0 or start < stop and step < 0.0 or step == 0.0:\n sys.stderr.write(\"Bad start/stop/step! %f %f %f \\n\" % (start, stop, step))\n\n else:\n cur = 0.0\n while start + cur * step < stop:\n yield start + cur * step\n cur += 1.0\n \ndef binary(n):\n \"\"\" Generate all binary numbers of length n \"\"\"\n out = []\n _binary_helper(n, (), out)\n return tuple(out)\n\ndef _binary_helper(n, l, output):\n if n == 0:\n output.append(l)\n else:\n _binary_helper(n-1, l + (0,), output), _binary_helper(n-1, l + (1,), output)\n" }, { "alpha_fraction": 0.8258426785469055, "alphanum_fraction": 0.8314606547355652, "avg_line_length": 24.428571701049805, "blob_id": "ade6db38966377eae78579263b22144fdbdb265e", "content_id": "509a662bbe5947b5f0167425788a2dd614a6fd88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 178, "license_type": "no_license", "max_line_length": 41, "num_lines": 7, "path": "/src/filter.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# load configuration\nexport USE_FILTER_DESCRIPTOR_SETTING=1\nsource configuration.sh\n\n# filter configurations per demonstration\necho \"=>filtering configurations\"\npython filter.py\n" }, { "alpha_fraction": 0.7972350120544434, "alphanum_fraction": 0.8018433451652527, "avg_line_length": 26.125, "blob_id": "481628707a47dff288c11b9e6d909231cc7349a3", "content_id": "45f19fd24842bcc08b169ef136eb4ca63171e586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 217, "license_type": "no_license", "max_line_length": 67, "num_lines": 8, "path": "/src/match_sketches.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# use neural network image features for matching\nexport USE_LSH_DESCRIPTOR_SETTING=1\n\n# load configuration\nsource configuration.sh\n\n# execute script\npython match_sketches.py ../data/demonstrations_all/$ABM/sketched .\n" }, { "alpha_fraction": 0.664454996585846, "alphanum_fraction": 0.6714060306549072, "avg_line_length": 27.258928298950195, "blob_id": "e4774f1caf353de94b7c81dd6ef02ecbe90bf191", "content_id": "1361346e20e305c02277554221ed63446a8c9e8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3165, "license_type": "no_license", "max_line_length": 88, "num_lines": 112, "path": "/src/sampling/eum/predict_eum.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport sys\n\nfrom negotiation_model import *\nfrom bdm_agent import *\n\n# seed this for fixed environment\n# for better replication of results\nrandom.seed(0)\n\n# constants\nINDEPENDENT_VARIABLES = 2\nDEPENDENT_VARIABLES = 2\nSAMPLES = 10\nTIME_LAPSE = 200\n\ndef get_salience_values(timeLapse, numberOfAgents):\n salienceValues = np.zeros((timeLapse, numberOfAgents))\n\n for stepNumber in range(timeLapse):\n for agentNumber in range(numberOfAgents):\n salienceValues[stepNumber][agentNumber] = random.random()\n\n return salienceValues\n\n# Defining the model objects\nclass BDMActor(NegotiationActor):\n DecisionClass = BDM_Agent\n\nclass NegotiationModel_(NegotiationModel):\n # Variables for median caching\n median_computed_last = -1\n median = -1\n \n def find_median(self):\n if self.median_computed_last != self.schedule.steps:\n self.median = super().find_median()\n self.median_computed_last = self.schedule.steps\n return self.median\n\nclass ModelOutput:\n def __init__(self, model):\n '''\n Store data from model run.\n '''\n self.agent_vars = model.datacollector.get_agent_vars_dataframe()\n self.model_vars = model.datacollector.get_model_vars_dataframe()\n self.log = model.log\n\ndef load_data():\n # Load data \n bookData = pd.read_csv(\"sampling/eum/BDM_ColdWar.csv\")\n bookData.Position = (bookData.Position + 100)/200\n\n return bookData\n\ndef main_eum(qValue, tValue, bookData, salienceValues):\n # define agents\n agents = []\n\n for i, row in bookData.iterrows():\n newAgent = BDMActor(row.Country, row.Capability, row.Position, 1)\n newAgent.decision_model.Q = qValue\n newAgent.decision_model.T = tValue\n newAgent.salience = salienceValues[i]\n agents.append(newAgent)\n\n # instantiate model\n model = NegotiationModel_(agents)\n\n # run model\n for stepNumber in range(TIME_LAPSE):\n agentNumber = 0\n for agent in model.agents:\n #agent.salience = random.random()\n agent.salience = salienceValues[stepNumber][agentNumber]\n agentNumber += 1 \n model.step()\n\n # collect data for next steps\n dependentValues = []\n modelOutput = ModelOutput(model)\n dependentValues.append(np.mean(list(modelOutput.model_vars[\"Median\"][-SAMPLES-1:])))\n dependentValues.append(np.mean(list(modelOutput.model_vars[\"Mean\"][-SAMPLES-1:])))\n \n return tuple(dependentValues)\n \ndef main():\n # read data\n bookData = load_data()\n\n # get salience values\n [rows, columns] = bookData.shape\n salienceValues = get_salience_values(TIME_LAPSE, rows)\n\n # simulate using specific ALPs\n alpConfigurationList = eval(sys.argv[1])\n\n # store corresponding slps\n slpConfigurationList = []\n\n for alpConfiguration in alpConfigurationList:\n [qValue, tValue] = alpConfiguration\n\n # run model using those ALPs\n dependentValues = main_eum(qValue, tValue, bookData, salienceValues)\n slpConfigurationList.append(dependentValues)\n\n print(slpConfigurationList)\n \nmain()\n" }, { "alpha_fraction": 0.648815393447876, "alphanum_fraction": 0.6504195332527161, "avg_line_length": 46.11627960205078, "blob_id": "35dd288d1da85d245866b180a4d70d40dbca58ec", "content_id": "49685798ac9fa1fd65d6d86ee36b4a5a04ac1523", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8104, "license_type": "no_license", "max_line_length": 282, "num_lines": 172, "path": "/src/filter.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import os\nimport sampling.utils.featurize_image as fi\nfrom sampling.utils.process_folder_images import process_folder\nimport sys\nimport inspect\nimport glob\nimport numpy as np\n\ncurrentDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentDir = os.path.dirname(currentDir)\nsys.path.insert(0,parentDir)\n\nimport amf.misc as misc\n\ndef normalized_distance(_a, _b):\n _a = np.array(_a)\n _b = np.array(_b)\n\n b = _b.astype(int)\n a = _a.astype(int)\n norm_diff = np.linalg.norm(b - a)\n norm1 = np.linalg.norm(b)\n norm2 = np.linalg.norm(a)\n return norm_diff / (norm1 + norm2)\n\nclass FilterConfiguration(object):\n def __init__(self):\n # get environment variables\n self.abm = os.environ[\"ABM\"]\n self.demonstrationFolder = os.environ[\"DEMONSTRATION_FOLDER\"]\n self.descriptorSetting = os.environ[\"DESCRIPTOR_SETTING\"]\n self.numDependent = eval(os.environ[\"NUM_DEPENDENT\"])\n self.dataFileName = os.environ[\"DATA_FILE\"]\n self.scaleDataFileName = os.environ[\"SCALE_DATA_FILE\"]\n self.images = eval(os.environ[\"IMAGES\"])\n\n # read configuration\n configurationFileName = os.environ[\"CONFIGURATION_FILE\"]\n configuration = open(configurationFileName).read()\n execd = {}\n exec(configuration, execd)\n self.numIndependent = execd['NUM_INDEPENDENT']\n \n# filter selected configurations based on closeness to demonstrations\ndef select_demonstration_configuratons(filterConfiguration, scaleData):\n # load network if needed\n net = fi.get_network(filterConfiguration.descriptorSetting)\n\n # for normalization\n if(scaleData is not None):\n [dependentValuesMax, dependentValuesMin] = scaleData\n \n # process directories in sorted order\n sortedPaths = []\n for dirName, subdirList, fileList in os.walk(filterConfiguration.demonstrationFolder):\n for sortedSubdirName in sorted(subdirList):\n sortedPaths.append(dirName + \"/\" + sortedSubdirName)\n\n # get slps for demonstrations\n demonstrationSlpsList = []\n demonstrationNameList = []\n for sortedPath in sortedPaths:\n for dirName, subdirList, fileList in os.walk(sortedPath):\n # only use non empty subdirectories\n if(len(fileList) > 0):\n # traverse all files in this directory\n if(bool(filterConfiguration.images) == True):\n # read dependent values as images\n dependentValues = [tuple(fi.get_features(net, dirName + \"/\" + fileName, filterConfiguration.descriptorSetting)) for fileName in fileList]\n else:\n # read dependent values as text\n dependentValues = [tuple([eval(x) for x in open(dirName + \"/\" + fileName).readlines()[0].replace(\"\\n\", \"\").split(\" \")]) for fileName in fileList]\n\n dependentValues = tuple(misc.col_average(dependentValues))\n demonstrationSlpsList.append(dependentValues)\n demonstrationName = dirName.split(\"/\")[-1].strip(\" \")\n demonstrationNameList.append(demonstrationName)\n\n minDistanceAlpsList = []\n minDistanceSlpsList = []\n for demonstrationIndex in range(len(demonstrationSlpsList)):\n demonstrationSlps = demonstrationSlpsList[demonstrationIndex]\n\n # normalize demonstration slps\n if(scaleData is not None):\n demonstrationSlps = tuple((demonstrationSlps[index] - dependentValuesMin[index])/(dependentValuesMax[index] - dependentValuesMin[index]) if (dependentValuesMax[index] > dependentValuesMin[index]) else dependentValuesMax[index] for index in range(len(demonstrationSlps)))\n \n distanceList = []\n \n with open(\"suggested_slps.txt\", \"r\") as suggestedSlpsFile:\n lines = suggestedSlpsFile.readlines()\n \n for suggestionIndex in range(len(lines)):\n # get slp corresponding to this demonstration\n line = lines[suggestionIndex]\n suggestedSlps = eval(line)[demonstrationIndex]\n\n if(bool(filterConfiguration.images == True)):\n if(eval(os.environ[\"IMAGE_FEATURIZATION_HOMOGENEOUS\"]) == 0):\n # this is needed if the two image featurization stages are different\n folderNames = glob.glob(\"sampling/\" + filterConfiguration.abm + \"/images_*\")\n suggestionFolder = folderNames[suggestionIndex]\n suggestedSlps = process_folder(suggestionFolder)\n \n # normalize suggested slps\n if(scaleData is not None):\n suggestedSlps = tuple((suggestedSlps[index] - dependentValuesMin[index])/(dependentValuesMax[index] - dependentValuesMin[index]) if (dependentValuesMax[index] > dependentValuesMin[index]) else dependentValuesMax[index] for index in range(len(suggestedSlps)))\n \n # compute distance\n if(os.environ[\"DISTANCE_METHOD\"] == \"NORMALIZED\"):\n distance = normalized_distance(suggestedSlps, demonstrationSlps)\n else:\n # default option\n distance = misc.distance(suggestedSlps, demonstrationSlps)\n distanceList.append((distance, suggestionIndex, suggestedSlps))\n\n # select entry corresponding to smallest distance\n minDistanceIndex = min(distanceList)[1]\n minDistanceSlps = min(distanceList)[2]\n\n # read the suggested alps corresponding to that index\n with open(\"suggested_alps.txt\", \"r\") as suggestedAlpsFile:\n minDistanceAlps = eval(suggestedAlpsFile.readlines()[minDistanceIndex])[demonstrationIndex]\n \n # write to file\n directory = \"../data/predictions/\" + filterConfiguration.abm + \"/\" + demonstrationNameList[demonstrationIndex] \n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(directory + \"/predicted_alps.txt\", \"w\") as predictionFile:\n predictionFile.write(str(minDistanceAlps) + \" \" + str(minDistanceSlps))\n\n if(bool(filterConfiguration.images == True)):\n # if images used, copy images\n # get all image simulation folders\n folderNames = glob.glob(\"sampling/\" + filterConfiguration.abm + \"/images_*\")\n minFolder = folderNames[minDistanceIndex]\n os.system(\"cp -r \" + minFolder + \" \" + directory)\n \n# filter configurations with command line parameters\ndef main():\n filterConfiguration = FilterConfiguration()\n\n # edit scale data file name based on descriptor setting\n scaleDataFileNamePrefix = filterConfiguration.scaleDataFileName.split(\".\")[0]\n scaleDataFileNameExtension = filterConfiguration.scaleDataFileName.split(\".\")[1]\n scaleDataFileName = scaleDataFileNamePrefix + \"_\" + filterConfiguration.abm\n if(bool(filterConfiguration.images) == True):\n scaleDataFileName += \"_\" + filterConfiguration.descriptorSetting.lower()\n scaleDataFileName += \".\" + scaleDataFileNameExtension\n\n # get scale information\n fileName = \"sampling/\" + filterConfiguration.abm + \"/\" + scaleDataFileName\n\n # scale data assumed not present by default\n scaleData = None\n\n if(os.path.exists(fileName) == True):\n with open(fileName, \"r\") as scaleDataFile:\n # read in order of writing in map.py\n maxs = eval(scaleDataFile.readline().strip(\"\\n\"))\n mins = eval(scaleDataFile.readline().strip(\"\\n\"))\n\n dependentMaxs = maxs[filterConfiguration.numIndependent:]\n dependentMins = mins[filterConfiguration.numIndependent:]\n dependentValuesMax = {i: dependentMaxs[i] for i in range(len(dependentMaxs))}\n dependentValuesMin = {i: dependentMins[i] for i in range(len(dependentMins))}\n scaleData = [dependentValuesMax, dependentValuesMin]\n\n select_demonstration_configuratons(filterConfiguration, scaleData)\n \n# execute main\nmain()\n" }, { "alpha_fraction": 0.593353271484375, "alphanum_fraction": 0.6056758761405945, "avg_line_length": 22.286956787109375, "blob_id": "81b80e6126f8ea8bfb5aacaaa1f1220f4c320dd7", "content_id": "f57dbc1885b6e61862bd3d9c7a5dad507ac44b03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2678, "license_type": "no_license", "max_line_length": 129, "num_lines": 115, "path": "/src/sampling/schelling/sample_schelling_parallel.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import SchellingModel\nimport random\nimport multiprocessing as mp\n\n# only use three of the several ALPs\n# density (0,1)\n# minority_pc (0,1)\n\n# use one model-level SLP\n# happy\n\n# parallel processing constants\nBATCH_SIZE = 8\n\n# constant ALPs\nHEIGHT = 20\nWIDTH = 20\nHOMOPHILY = 4\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 2\nDEPENDENT_VARIABLES = 1\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 1000\n\n# process single run data\ndef process_run(q, run, density, minorityPc):\n try:\n line = main_eum(density, minorityPc)\n except:\n line = \"\"\n \n # add newline for clean output\n line = \"\\n\" + line\n \n # write to queue\n q.put(line)\n \ndef main_schelling(density, minorityPc):\n # instantiate and run model\n model = SchellingModel(\n height=HEIGHT,\n width=WIDTH,\n density=density,\n minority_pc=minorityPc,\n homophily=HOMOPHILY)\n\n for i in range(SAMPLES):\n try:\n # step\n model.step()\n except:\n # saturated\n # no empty cells\n pass\n\n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n # step\n model.step()\n\n # read data\n data = model.datacollector.get_model_vars_dataframe() \n dependentValues.append(list(data.happy)[-SAMPLES-1:])\n\n # print line corresponding to this execution\n line = str(density) + \" \" + str(minorityPc)\n for dependentValue in dependentValues:\n line += \" \" + str(dependentValue).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\n\n return line\n\n \ndef main():\n # header string\n print(\"i\"*INDEPENDENT_VARIABLES + \"d\"*DEPENDENT_VARIABLES)\n\n densityValues = []\n minorityPcValues = []\n \n for run in range(RUNS):\n # sample random ALPs\n density = random.random() \n minorityPc = random.random() \n\n densityValues.append(density)\n minorityPcValues.append(minorityPc)\n\n # setup a list of processes that we want to run\n q = mp.Queue()\n processes = [mp.Process(target=process_run, args=(q, run, densityValues[run], minorityPcValues[run])) for run in range(RUNS)]\n batchSize = BATCH_SIZE\n batches = [processes[i:i+batchSize] for i in range(0, len(processes), batchSize)]\n\n outFile = open(\"output.txt\", \"a\")\n \n for batch in batches:\n # run processes\n for p in batch:\n p.start()\n for p in batch:\n line = q.get()\n outFile.write(line)\n # exit the completed processes\n for p in batch:\n p.join()\n\n outFile.close()\n\nmain()\n" }, { "alpha_fraction": 0.5906005501747131, "alphanum_fraction": 0.6073107123374939, "avg_line_length": 31.457626342773438, "blob_id": "9a7f39e9b6aef59e42587e05febaddc8a1629db8", "content_id": "25299fd63872ecf9830f3e159794c1cad234e2cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1915, "license_type": "no_license", "max_line_length": 106, "num_lines": 59, "path": "/snippets/turbulence/plot_suggested_slps.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n#demonstrations = [\"a\", \"b\", \"c\"]\n#demonstrations = [\"_a\", \"_b\", \"_c\"]\ndemonstrations = [\"a\"]\n#demonstrations = [\"_a\"]\n#demonstrations = [\"a_filtered\"]\n\nfolderName = \"error_weighted\"\n#folderName = \"min_error\"\n\nwith open(\"../../src/sampling/turbulence/scale_data_turbulence_density.txt\", \"r\") as inFile:\n allLines = inFile.readlines()\n maxValueDensity = eval(allLines[0])[-1]\n minValueDensity = eval(allLines[1])[-1]\n\nfor index, item in enumerate(demonstrations):\n allPointsX = []\n allPointsY = []\n\n # scatter\n \n plt.figure()\n with open(folderName + \"/suggested_slps_\" + item + \".txt\", \"r\") as inFile:\n for line in inFile:\n point = eval(line)[0]\n\n # scale\n #point = tuple([(point[0]-minValueDensity)/(maxValueDensity-minValueDensity)])\n\n plt.scatter(point[0], point[0], c=\"b\", marker=\"o\", alpha=0.3)\n allPointsX.append(point[0])\n allPointsY.append(point[0])\n \n plt.scatter(point[0], point[0], c=\"b\", marker=\"o\", alpha=0.3, label=\"Suggested SLPs\")\n\n with open(\"demonstration_slps.txt\", \"r\") as inFile:\n point = eval(inFile.readlines()[index])\n\n # scale\n #point = tuple([(point[0]-minValueDensity)/(maxValueDensity-minValueDensity)])\n \n plt.scatter(point[0], point[0], c=\"r\", marker=\"s\", alpha=1.0, label=\"Demonstration SLPs\")\n\n plt.scatter(np.mean(allPointsX), np.mean(allPointsY), c=\"g\", marker=\"s\", alpha=1.0, label=\"Mean SLPs\")\n plt.xlabel(\"Density\")\n plt.ylabel(\"Density\")\n plt.legend()\n plt.savefig(folderName + \"/suggested_slps_\" + item)\n\n # histogram\n \n plt.figure()\n plt.hist(allPointsX, bins=50, range=(0,50))\n plt.xlabel(\"Density\")\n plt.ylabel(\"Number of Points\")\n plt.gca().set_ylim([0,100])\n plt.savefig(folderName + \"/histogram_suggested_slps_\" + item)\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7785714268684387, "avg_line_length": 22.33333396911621, "blob_id": "f7f52b9a07b9f0ffd9d5d242d30cadc8e5c9381a", "content_id": "c2c9a561ebca1d1d5f50d1ef74188a8d3d61f2bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/src/domainconfs/turbulence.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "TRAINING_SIZE = 200\nVALIDATION_SIZE = 2500\nRM_GRANULARITY = 300\nREGRESSION = \"sklearn.neighbors.KNeighborsRegressor()\"\n\nNUM_INDEPENDENT = 2\n" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.5873016119003296, "avg_line_length": 41, "blob_id": "7b5b921b865a519db7dd55473231f2561b10b310", "content_id": "bcbbfa8916d5a7b31b6eeee281019ab173e38515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 61, "num_lines": 6, "path": "/data/domaindata/fix_results.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# needed in case two results answers the same name identifier\nwith open(\"results.csv\", \"r\") as inFile:\n with open(\"temp.csv\", \"w\") as outFile:\n for line in inFile:\n line = line.replace(\"|\", \"\\\",\\\"\")\n outFile.write(line)\n" }, { "alpha_fraction": 0.6573289632797241, "alphanum_fraction": 0.6801303029060364, "avg_line_length": 22.25757598876953, "blob_id": "56265a9ff22d8615011ce681c7f3c10fa2c4981b", "content_id": "bef667f68ffdf3fceb5aef11dcc07b40212654f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1535, "license_type": "no_license", "max_line_length": 67, "num_lines": 66, "path": "/src/sampling/forest_fire/predict_forest_fire.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import ForestFire\nimport random\nimport sys\nimport numpy as np\n\n# only one ALP\n# density (0,1)\n\n# constant ALPs\nHEIGHT = 100\nWIDTH = 100\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 1\nDEPENDENT_VARIABLES = 3\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 200\n\ndef main_forest_fire(density=0.65):\n # instantiate and run model\n model = ForestFire(height=HEIGHT, width=WIDTH, density=density)\n\n # run time lapse\n for i in range(TIME_LAPSE):\n model.step()\n\n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n # step\n model.step()\n\n # read data\n data = model.datacollector.get_model_vars_dataframe()\n burnedTrees = np.array(list(data.BurnedOut)[-SAMPLES-1:])\n fineTrees = np.array(list(data.Fine)[-SAMPLES-1:])\n initialTrees = burnedTrees + fineTrees\n dependentValues.append(np.mean(burnedTrees/initialTrees))\n\n return tuple(dependentValues)\n \ndef main():\n # fixed randomness\n # parimarily done to test feedback\n random.seed(0)\n \n # simulate using specific ALPs\n alpConfigurationList = eval(sys.argv[1])\n\n # store corresponding slps\n slpConfigurationList = []\n \n for alpConfiguration in alpConfigurationList:\n [density] = alpConfiguration\n \n # run model using those ALPs\n dependentValues = main_forest_fire(density)\n slpConfigurationList.append(dependentValues)\n\n print(slpConfigurationList)\n \nmain()\n" }, { "alpha_fraction": 0.6421149373054504, "alphanum_fraction": 0.6500611305236816, "avg_line_length": 27.452173233032227, "blob_id": "9f0341d6f2f2df8aa43a8dffb7056ede562fc780", "content_id": "890c9096b744269d09268aa6c9619c8fa586c910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3272, "license_type": "no_license", "max_line_length": 92, "num_lines": 115, "path": "/src/sampling/eum/sample_eum.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport sys\n\nfrom negotiation_model import *\nfrom bdm_agent import *\n\n# seed this for fixed environment\n# for better replication of results\nrandom.seed(0)\n\n# constants\nINDEPENDENT_VARIABLES = 2\nDEPENDENT_VARIABLES = 2\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 200\n\ndef get_salience_values(timeLapse, numberOfAgents):\n salienceValues = np.zeros((timeLapse, numberOfAgents))\n\n for stepNumber in range(timeLapse):\n for agentNumber in range(numberOfAgents):\n salienceValues[stepNumber][agentNumber] = random.random()\n\n return salienceValues\n\n# Defining the model objects\nclass BDMActor(NegotiationActor):\n DecisionClass = BDM_Agent\n\nclass NegotiationModel_(NegotiationModel):\n # Variables for median caching\n median_computed_last = -1\n median = -1\n \n def find_median(self):\n if self.median_computed_last != self.schedule.steps:\n self.median = super().find_median()\n self.median_computed_last = self.schedule.steps\n return self.median\n\nclass ModelOutput:\n def __init__(self, model):\n '''\n Store data from model run.\n '''\n self.agent_vars = model.datacollector.get_agent_vars_dataframe()\n self.model_vars = model.datacollector.get_model_vars_dataframe()\n self.log = model.log\n\ndef load_data():\n # Load data \n bookData = pd.read_csv(\"BDM_ColdWar.csv\")\n bookData.Position = (bookData.Position + 100)/200\n\n return bookData\n\ndef main_eum(qValue, tValue, bookData, salienceValues):\n # define agents\n agents = []\n\n for i, row in bookData.iterrows():\n newAgent = BDMActor(row.Country, row.Capability, row.Position, 1)\n newAgent.decision_model.Q = qValue\n newAgent.decision_model.T = tValue\n newAgent.salience = salienceValues[i]\n agents.append(newAgent)\n\n # instantiate model\n model = NegotiationModel_(agents)\n\n # run model\n for stepNumber in range(TIME_LAPSE):\n agentNumber = 0\n for agent in model.agents:\n #agent.salience = random.random()\n agent.salience = salienceValues[stepNumber][agentNumber]\n agentNumber += 1 \n model.step()\n\n # collect data for next steps\n dependentValues = []\n modelOutput = ModelOutput(model)\n dependentValues.append(list(modelOutput.model_vars[\"Median\"][-SAMPLES-1:]))\n dependentValues.append(list(modelOutput.model_vars[\"Mean\"][-SAMPLES-1:]))\n \n # print line corresponding to this execution\n line = str(qValue) + \" \" + str(tValue)\n for dependentValue in dependentValues:\n line += \" \" + str(dependentValue).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\n print(line)\n\ndef main():\n # header string\n print(\"i\"*INDEPENDENT_VARIABLES + \"d\"*DEPENDENT_VARIABLES)\n\n # read data\n bookData = load_data()\n\n # get salience values\n [rows, columns] = bookData.shape\n salienceValues = get_salience_values(TIME_LAPSE, rows)\n \n for run in range(RUNS):\n print(run, file=sys.stderr)\n \n # sample random ALPs\n qValue = random.random()\n tValue = random.random()\n \n # run model using those ALPs\n main_eum(qValue, tValue, bookData, salienceValues)\n \nmain()\n" }, { "alpha_fraction": 0.5925377607345581, "alphanum_fraction": 0.5987563133239746, "avg_line_length": 39.2023811340332, "blob_id": "dab20f1fa3ceb3346a5f77d3e106f650a24c62ae", "content_id": "d69d05eab60363b2473cf96ff9331a809e81d73e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3377, "license_type": "no_license", "max_line_length": 103, "num_lines": 84, "path": "/src/amf/fm.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# fm.py contains the ForwardMapping frontend.\n\nimport data\nimport sys\nimport numpy as np\nimport sklearn\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom smt.surrogate_models import RBF, KRG, KPLS, KPLSK\nimport os\n\n# This thing is the Forward Mapping\nclass ForwardMapping(object):\n\n # pass in the regression algorithm that we will be using, along with any initialization parameters\n def __init__(self, regression_class, num_dependent):\n \"\"\"\n Initialized a forward mapping.\n Pass in the regression class to be used, the initial parameters for initlializing this reg class.\n Also, pass the the number of dependent variables the data set will have.\n \"\"\"\n self._mappers = [eval(regression_class) for x in range(num_dependent)]\n\n def train(self, training_set):\n # remove data entries which contain nan\n training_set = [x for x in training_set if not np.isnan(x[0]).any()]\n training_set = [x for x in training_set if not np.isnan(x[1]).any()]\n training_sets = data.split_dep(training_set)\n\n for mapper, tset in zip(self._mappers, training_sets):\n X = np.array([list(x[0]) for x in tset])\n Y = np.array([x[1] for x in tset])\n \n if(\"smt\" in str(mapper)):\n # using surrogate modeling toolbox\n X = np.array([x[0] for x in X])\n VThreshold = 0.001\n\n XSafe = [X[0]]\n YSafe = [Y[0]]\n for index in range(1, len(X)):\n currentX = X[index]\n currentY = Y[index]\n XSafetyCheck = XSafe + [currentX]\n YSafetyCheck = YSafe + [currentY]\n\n # we need to only collect those points that do not case matrix inversion problems\n # check using svd\n configurationFileName = os.environ[\"CONFIGURATION_FILE\"]\n configuration = open(configurationFileName).read()\n execd = {}\n exec(configuration, execd)\n numIndependent = execd['NUM_INDEPENDENT']\n numDependent = eval(os.environ[\"NUM_DEPENDENT\"])\n XSafetyCheck = np.array(XSafetyCheck).reshape(numIndependent, -1)\n YSafetyCheck = np.array(YSafetyCheck).reshape(numDependent, -1)\n XYSafetyCheck = np.concatenate((XSafetyCheck, YSafetyCheck), axis=0)\n U, s, V = np.linalg.svd(XYSafetyCheck, full_matrices=True)\n V = np.absolute(V)\n\n if(np.min(V) > VThreshold):\n XSafe.append(currentX)\n YSafe.append(currentY)\n \n # convert to numpy array\n # train model\n XSafe = np.array(XSafe)\n YSafe = np.array(YSafe)\n mapper.set_training_values(XSafe, YSafe)\n mapper.train()\n else:\n mapper.fit(X, Y)\n\n def predict(self, configuration):\n prediction = []\n for mapper in self._mappers:\n if(\"smt\" in str(mapper)):\n mapperPrediction = mapper.predict_values(np.array(list(configuration)).reshape(1, -1))\n mapperPrediction = [x[0] for x in mapperPrediction]\n else:\n mapperPrediction = mapper.predict(np.array(list(configuration)).reshape(1, -1)) \n prediction.append(mapperPrediction)\n prediction = tuple([x[0] for x in prediction])\n \n return prediction\n" }, { "alpha_fraction": 0.7027027010917664, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 36, "blob_id": "80bd3913abf6b1ca1504165909b145c267f58805", "content_id": "8cb3915a5ec3995ca8a235d648d9cf09a614929c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 37, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/app/run_public_url.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "ngrok http 5000 -subdomain=swarm-lfd\n" }, { "alpha_fraction": 0.6554731726646423, "alphanum_fraction": 0.6720453500747681, "avg_line_length": 25.662790298461914, "blob_id": "c00719c19bf8a7489d616f74b4ffb39138f77786", "content_id": "dfe90210275d5a2213fa24eaae6b80a264af5f29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2293, "license_type": "no_license", "max_line_length": 89, "num_lines": 86, "path": "/src/sampling/civil_violence/predict_civil_violence.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import CivilViolenceModel\nimport random\nimport sys\nimport numpy as np\nimport os\n\n# only use three of the several ALPs\n# citizen_density (0,1)\n# cop_density (0,1)\n# legitimacy (0,1)\n\n# use three model-level SLPs\n# Quiescent\n# Active\n# Jailed\n\n# constant ALPs\nHEIGHT = 40\nWIDTH = 40\nCITIZEN_VISION = 7\nCOP_VISION = 7\nMAX_JAIL_TERM = 1000\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 3\nDEPENDENT_VARIABLES = 3\nSAMPLES = 10\nTIME_LAPSE = 1000\n\ndef main_civil_violence(citizenDensity, copDensity, legitimacy):\n # instantiate and run model\n model = CivilViolenceModel(\n height=HEIGHT,\n width=WIDTH,\n citizen_vision=CITIZEN_VISION,\n cop_vision=COP_VISION,\n max_jail_term=MAX_JAIL_TERM,\n citizen_density=citizenDensity,\n cop_density=copDensity,\n legitimacy=legitimacy)\n\n # run time lapse\n for i in range(TIME_LAPSE):\n model.step()\n\n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n # step\n model.step()\n\n # read data\n data = model.dc.get_model_vars_dataframe() \n dependentValues.append(np.mean(list(data.Quiescent)[-SAMPLES-1:]))\n dependentValues.append(np.mean(list(data.Active)[-SAMPLES-1:]))\n dependentValues.append(np.mean(list(data.Jailed)[-SAMPLES-1:]))\n\n return tuple(dependentValues)\n \ndef main():\n # simulate using specific ALPs\n alpConfigurationList = eval(sys.argv[1])\n\n # store corresponding slps\n slpConfigurationList = []\n \n for alpConfiguration in alpConfigurationList:\n [citizenDensity, copDensity, legitimacy] = alpConfiguration\n\n # cop density + citizen density should be less than 1\n if(citizenDensity + copDensity > 1):\n # invalid parameters\n # set to infinity so distance is infinite\n # this value will be considered as a bad suggestion when filtering\n dependentValues = [float(\"Inf\")]*eval(os.environ[\"NUM_DEPENDENT\"])\n else:\n # run model using those ALPs\n dependentValues = main_civil_violence(citizenDensity, copDensity, legitimacy)\n slpConfigurationList.append(dependentValues)\n\n print(slpConfigurationList)\n \nmain()\n" }, { "alpha_fraction": 0.6349663734436035, "alphanum_fraction": 0.6551392674446106, "avg_line_length": 25.024999618530273, "blob_id": "180657a6bfc68f62ae32fc65df08057a4dceb821", "content_id": "0dac153578d624ace0ca139bc6150746db256084", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2082, "license_type": "no_license", "max_line_length": 92, "num_lines": 80, "path": "/src/sampling/civil_violence/sample_civil_violence.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import CivilViolenceModel\nimport random\n\n# only use three of the several ALPs\n# citizen_density (0,1)\n# cop_density (0,1)\n# legitimacy (0,1)\n\n# use three model-level SLPs\n# Quiescent\n# Active\n# Jailed\n\n# constant ALPs\nHEIGHT = 40\nWIDTH = 40\nCITIZEN_VISION = 7\nCOP_VISION = 7\nMAX_JAIL_TERM = 1000\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 3\nDEPENDENT_VARIABLES = 3\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 1000\n\ndef main_civil_violence(citizenDensity, copDensity, legitimacy):\n # instantiate and run model\n model = CivilViolenceModel(\n height=HEIGHT,\n width=WIDTH,\n citizen_vision=CITIZEN_VISION,\n cop_vision=COP_VISION,\n max_jail_term=MAX_JAIL_TERM,\n citizen_density=citizenDensity,\n cop_density=copDensity,\n legitimacy=legitimacy)\n\n # run time lapse\n for i in range(TIME_LAPSE):\n model.step()\n\n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n # step\n model.step()\n\n # read data\n data = model.dc.get_model_vars_dataframe() \n dependentValues.append(list(data.Quiescent)[-SAMPLES-1:])\n dependentValues.append(list(data.Active)[-SAMPLES-1:])\n dependentValues.append(list(data.Jailed)[-SAMPLES-1:])\n\n # print line corresponding to this execution\n line = str(citizenDensity) + \" \" + str(copDensity) + \" \" + str(legitimacy)\n for dependentValue in dependentValues:\n line += \" \" + str(dependentValue).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\n print(line)\n\n \ndef main():\n # header string\n print(\"i\"*INDEPENDENT_VARIABLES + \"d\"*DEPENDENT_VARIABLES)\n\n for run in range(RUNS):\n # sample random ALPs\n # cop density + citizen density should be less than 1\n citizenDensity = random.random() \n copDensity = (1-citizenDensity)*random.random() \n legitimacy = random.random()\n \n # run model using those ALPs\n main_civil_violence(citizenDensity, copDensity, legitimacy)\n\nmain()\n" }, { "alpha_fraction": 0.6672787070274353, "alphanum_fraction": 0.6705839037895203, "avg_line_length": 31.807228088378906, "blob_id": "dc44bad69890e9034018b1c7ed7e876a945268d2", "content_id": "4069ce92e26a40606230288b7df6e0d449728504", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 198, "num_lines": 83, "path": "/src/map.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# This script takes each row of a data set and processes it for use with AMF\nimport sys\nimport math\nimport os\n\nno_entry = \"NOENTRY\"\nSCALE_DATA_FILE = \"scale_data.txt\"\n\ndef map_abm(tokens):\n \"\"\"\n The raw data set is a combination of floats followed by a combination of lists and looks like this:\n FLOAT FLOAT ... LIST LIST ...\n Non-lists are independent variables\n Lists are dependent variables\n It takes the average of the list and returns it with the independent variables.\n \"\"\"\n\n tokenAvg = []\n\n for token in tokens:\n if(type(token) == list):\n avg = sum(token)/len(token)\n else:\n avg = token\n\n tokenAvg.append(avg)\n\n return tuple(tokenAvg)\n\ndef linear_scale(dataset):\n \"\"\"Linearly scales an entire data set to a range of 0-1\"\"\"\n\n maxs = list(dataset[0])\n mins = list(dataset[0])\n\n for datarow in dataset:\n for idx, item in enumerate(datarow):\n if item == no_entry: continue\n\n maxs[idx] = max(maxs[idx], item)\n mins[idx] = min(mins[idx], item)\n\n scaleDataFileName = SCALE_DATA_FILE\n abm = os.environ[\"ABM\"]\n descriptorSetting = os.environ[\"DESCRIPTOR_SETTING\"]\n\n # edit scale data file name based on descriptor setting\n scaleDataFileNamePrefix = scaleDataFileName.split(\".\")[0]\n scaleDataFileNameExtension = scaleDataFileName.split(\".\")[1]\n scaleDataFileName = scaleDataFileNamePrefix + \"_\" + abm\n if(len(descriptorSetting) > 0):\n # valid descriptor\n scaleDataFileName += \"_\" + descriptorSetting.lower()\n scaleDataFileName += \".\" + scaleDataFileNameExtension\n \n with open(scaleDataFileName, \"w\") as scaleDataFile:\n scaleDataFile.write(str(maxs))\n scaleDataFile.write(\"\\n\")\n scaleDataFile.write(str(mins))\n\n return [ [ (no_entry if item == no_entry else maxs[idx] if maxs[idx] == mins[idx] else (item - mins[idx]) / (maxs[idx] - mins[idx])) for idx, item in enumerate(datarow) ] for datarow in dataset ]\n\ndef parse(data_file_path):\n \"\"\"\n Applies 'map_function' to each row (as a string) of a data set.\n 'map_function' should takes a list of tokens as the only parameter and return a string as the only parameter.\n Prints out the new data set.\n \"\"\"\n\n return [ [ float(token) if token.find(',') == -1 else [ float(item) for item in token.split(',') ] for token in line.split() ] for line in open(data_file_path).xreadlines() ]\n\ndef dump(dataset):\n print \"\\n\".join( \" \".join(str(item) for item in row) for row in dataset)\n\ndef process_abm():\n parsed = parse(sys.argv[1])\n averaged = map(map_abm, parsed)\n scaled = linear_scale(averaged)\n dump(scaled)\n\n# average over system level paramters in raw data file\n# raw data file is provided as argument\nprocess_abm()\n" }, { "alpha_fraction": 0.6269554495811462, "alphanum_fraction": 0.6281588673591614, "avg_line_length": 26.09782600402832, "blob_id": "910587596e08507a8f46aa10de2d59ca35161a15", "content_id": "a29d16ac260c7abde4aa19fa97ee8bb0378abaa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2493, "license_type": "no_license", "max_line_length": 130, "num_lines": 92, "path": "/src/sampling/utils/process_training_images.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# libraries used\nimport sys\nimport os\nimport featurize_image as fi\nimport multiprocessing as mp\nimport glob\n\n# parallel processing constants\nBATCH_SIZE = 8\n\n# process single run data\ndef process_run(q, run, runConfiguration, line):\n\n # unpack run configuration\n [inFolder, dataFile, numDependent, net, descriptorSetting] = runConfiguration\n\n # initialize collection of dependent values\n depValuesStrings = [\"\"]*numDependent\n\n for imageName in glob.glob(inFolder + \"/\" + str(run) + \"_*.*\"):\n # read image\n print >> sys.stderr, (\"now processing \" + imageName)\n \n # define dependent values\n depValues = fi.get_features(net, imageName, descriptorSetting)\n\n for index in range(len(depValues)):\n depValuesStrings[index] += str(depValues[index]) + \",\"\n\n # add value data to line\n for depValuesString in depValuesStrings:\n line += \" \" + depValuesString[:-1]\n\n # remove extra \",\" at end\n line = line + \"\\n\"\n\n # write to queue\n q.put(line)\n\n# read images and extract features as dependent variables\ndef main():\n\n # get environment variables\n abm = os.environ[\"ABM\"]\n descriptorSetting = os.environ[\"DESCRIPTOR_SETTING\"]\n numDependent = eval(os.environ[\"NUM_DEPENDENT\"])\n dataFileName = os.environ[\"DATA_FILE\"]\n\n dataFile = open(abm + \".txt\", \"r\")\n tempFile = open(\"temp.txt\", \"w\")\n\n # set up image reading\n inFolder = abm + \"/images\"\n\n # load feature extractor if needed\n net = fi.get_network(descriptorSetting)\n\n # read data\n lines = dataFile.readlines()\n\n # get number of runs\n runs = len(lines)\n \n # process runs\n runConfiguration = [inFolder, dataFile, numDependent, net, descriptorSetting]\n\n # setup a list of processes that we want to run\n q = mp.Queue()\n processes = [mp.Process(target=process_run, args=(q, run, runConfiguration, lines[run].strip(\" \\n\"))) for run in range(runs)] \n batchSize = BATCH_SIZE\n batches = [processes[i:i+batchSize] for i in range(0, len(processes), batchSize)]\n\n for batch in batches:\n # run processes\n for p in batch:\n p.start()\n for p in batch:\n line = q.get()\n tempFile.write(line)\n # exit the completed processes\n for p in batch:\n p.join()\n\n # close files and rename\n dataFile.close()\n tempFile.close()\n\n os.remove(abm + \".txt\")\n os.rename(\"temp.txt\", abm + \".txt\")\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5741397738456726, "alphanum_fraction": 0.5817003846168518, "avg_line_length": 35.206703186035156, "blob_id": "9310d151e1411d6b2469b6a34580daca836eca07", "content_id": "6e231729abd51e8557ec5f9e45a0633308bc892a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6481, "license_type": "no_license", "max_line_length": 121, "num_lines": 179, "path": "/src/sampling/utils/outlier_detection.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.io as sio\nfrom sets import Set\nfrom ltfatpy.sigproc.thresh import thresh\nfrom sklearn.cluster import KMeans, MeanShift\nfrom collections import Counter\n\nimport sys\n\ndef lpod(trainData, testData, trainLabels, testLabels, lNum, tol, lambdaValue):\n # this is a python port of the original matlab code provided by the authors\n # of the 2017 paper titled \"Efficient Outlier Detection for High-Dimensional Data\"\n \n # supress invalid division warning\n # this happens in the matlab code too so we're ok\n np.seterr(divide='ignore', invalid='ignore')\n \n # size of x and y\n # check for size mismatch\n [rtrainData,ctrainData] = trainData.shape\n [rtestData,ctestData] = testData.shape\n assert rtrainData == rtestData\n\n # allocate memory to the maximum size\n # T is the score matrix of trainData\n # P is the loading matrix of trainData\n # U is the score matrix of testData\n # Q is the loading matrix of testData\n # B is the matrix of regression coefficient\n # W is the weight matrix of trainData \n n = max(ctrainData, ctestData)\n T = np.zeros((rtrainData, n)) \n P = np.zeros((ctrainData, n)) \n U = np.zeros((rtestData, n)) \n Q = np.zeros((ctestData, n)) \n B = np.zeros((n, n)) \n W = P \n k = 0\n\n # iteration loop if residual is larger than specfied\n iteration = 1\n\n while(iteration < lNum and np.linalg.norm(testData) > tol and k < n):\n # choose the column of x has the largest square of sum as t\n # choose the column of y has the largest square of sum as u\n tidx = np.argmax(sum(np.multiply(trainData, trainData)))\n uidx = np.argmax(sum(np.multiply(testData, testData)))\n t1 = trainData[:, tidx]\n u = testData[:, uidx]\n t = np.zeros((rtrainData, 1))\n\n # iteration for outer modeling until convergence\n while (np.linalg.norm(t1 - t) > tol):\n # computing the weight vector of trainData\n # computing the score vector of trainData\n # obtaining q by soft-threshold\n w = np.matmul(trainData.transpose(), u) \n w = w / np.linalg.norm(w)\n t = t1\n t1 = np.matmul(trainData, w) \n q, dummy = thresh(np.matmul(testData.transpose(), t1), lambdaValue, thresh_type=\"soft\")\n \n # computing the loading vector of testData according to t\n # computing the score vector of testData\n q = q / np.linalg.norm(q)\n u = np.matmul(testData, q) \n \n # update p based on t\n # computing the loading vecor of trainData\n t = t1\n p = np.matmul(trainData.transpose(), t) / np.matmul(t.transpose(), t)\n pnorm = np.linalg.norm(p)\n p = p / pnorm \n t = t * pnorm\n w = w * pnorm\n\n # regression and residuals\n t = t.reshape((-1, 1))\n p = p.reshape((-1, 1))\n u = u.reshape((-1, 1))\n q = q.reshape((-1, 1))\n\n b = np.matmul(u.transpose(), t) / np.matmul(t.transpose(), t)\n trainData = trainData - np.matmul(t, p.transpose())\n testData = testData - (b * np.matmul(t, q.transpose()))\n\n # save iteration results to outputs:\n k += 1\n T[:, k] = list(t)\n P[:, k] = list(p)\n U[:, k] = list(u)\n Q[:, k] = list(q)\n W[:, k] = list(w)\n B[k, k] = b\n \n iteration += 1\n\n # end of iterations\n T = T[:, :k]\n P = P[:, :k]\n U = U[:, :k]\n Q = Q[:, :k]\n W = W[:, :k]\n B = B[:k, :k]\n\n # prediction stage:\n # predicting real-valued matrix for test_data\n # transforming outputs into the corresponding label matrix\n # 0.5 is the default threshold value\n outputs = np.matmul(np.matmul(np.matmul(P, B), Q.transpose()).transpose(), trainLabels)\n [numClass, numTesting] = outputs.shape\n predictionLabels = np.zeros((numClass, numTesting))\n for i in range(numTesting):\n for j in range(numClass):\n if(outputs[j,i] >= 0.5): \n predictionLabels[j,i] = 1\n else:\n predictionLabels[j,i] = 0\n\n return predictionLabels\n\ndef do_lpod(X, Y):\n # outlier detection\n Y = Y.astype(float).transpose()\n allDataLength = len(Y)\n trainDataFraction = 0.7\n trainDataLength = np.floor(trainDataFraction * allDataLength)\n \n # create logical index vector\n # randomize order to select random elements\n trainIndexes = np.random.choice(int(allDataLength), int(trainDataLength), replace=False)\n testIndexes = np.setdiff1d(range(allDataLength), trainIndexes)\n \n trainData = X[trainIndexes, :]\n trainLabels = Y[trainIndexes]\n testData = X[testIndexes, :]\n testLabels = Y[testIndexes]\n \n # hyper parameters\n # get predictions\n lNum = 50\n tol = 1e-10\n lambdaValue = 0.1\n predictionLabels = lpod(trainData.transpose(), testData.transpose(), trainLabels, testLabels, lNum, tol, lambdaValue)\n lpodX = np.concatenate((trainData, testData), axis=0)\n lpodY = np.concatenate((trainLabels, testLabels), axis=0)\n\n return [lpodX, lpodY]\n\ndef get_approximate_labels(allConfigurations):\n # cluster points\n # outlier is decided as the minority cluster\n # count population of each cluster\n # outliers are labeled as 1\n # inliers are labeled as 0\n # force 2 clusters as backup in case meanshift finds a single cluster\n X = np.array(allConfigurations)\n meanShift = MeanShift(bin_seeding=True)\n meanShift.fit(X)\n nClusters = 2\n kmeans = KMeans(init='k-means++', n_clusters=nClusters)\n kmeans.fit(X)\n labelCounts = Counter(meanShift.labels_)\n\n if(len(labelCounts.keys()) > 1):\n # multiple clusters found by meanshift\n labels = meanShift.labels_\n else:\n # single cluster found by meanshift\n # use kmeans clusters instead\n labels = kmeans.labels_\n \n labelCounts = Counter(labels)\n minLabel = min(labelCounts, key=labelCounts.get)\n labels = ['1' if x == minLabel else '0' for x in labels]\n labels = [eval(x) for x in labels]\n labels = np.array(labels, ndmin=2)\n \n return [X, labels]\n" }, { "alpha_fraction": 0.6721804738044739, "alphanum_fraction": 0.6827067732810974, "avg_line_length": 16.972972869873047, "blob_id": "feca9bd17df0f29a1e5cb57306497076c663157d", "content_id": "29d5276f4eddeedd6e0e60a5b60961e973ddda45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 665, "license_type": "no_license", "max_line_length": 62, "num_lines": 37, "path": "/src/sampling/heatbugs/run_heatbugs.java", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import heatbugs.HeatBugs;\nimport sim.engine.*;\n\npublic class run_heatbugs extends SimState\n{\n\tpublic run_heatbugs(long seed)\n\t{\n\t\tsuper(seed);\t\n\t}\n\n\tpublic static void main(String[] args)\n\t{\n\t\t// initialize\n\t\tSimState state = new HeatBugs(System.currentTimeMillis());\n\t\t\n\t\t// set parameters\n\t\tHeatBugs heatBugs = (HeatBugs)state;\n\t\theatBugs.setDiffusionConstant(0.0);\n\n\t\t// run\n\t\tstate.start();\n\n\t\tdo\n\t\t{\n\t\t\tif (!state.schedule.step(state)) \n\t\t\t\tbreak;\n\t\t}\n\t\twhile(state.schedule.getSteps() < 5000);\n\t\t\n\t\t// save state as image\n\t\t// get some state information\n\t\tSystem.out.println(heatBugs.getRandomMovementProbability());\n\n\t\tstate.finish();\n\t\tSystem.exit(0);\n\t}\n}\n" }, { "alpha_fraction": 0.6717171669006348, "alphanum_fraction": 0.6717171669006348, "avg_line_length": 27.285715103149414, "blob_id": "024ca2f638c42bb5954b9c78058c458f022b7583", "content_id": "2d8eb79139ebed4a23ead99f5fa490eaf658bb34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 74, "num_lines": 7, "path": "/snippets/plot_error.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import numpy as np\n\nwith open(\"../src/suggested_alps_fm_prediction_error.txt\", \"r\") as inFile:\n errors = [eval(x) for x in inFile.readlines()]\n\n print np.mean(errors)\n print np.std(errors)\n" }, { "alpha_fraction": 0.62347012758255, "alphanum_fraction": 0.6335493326187134, "avg_line_length": 26.50494956970215, "blob_id": "4299bab84a5f3e6c72a9b75a73bf215dd333796e", "content_id": "39d4907cc464658a0fd1a679c1ee7b0268a5acea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2778, "license_type": "no_license", "max_line_length": 105, "num_lines": 101, "path": "/app/app_server.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import os\nimport base64\nimport glob\n\nfrom flask import Flask, render_template, request\nfrom werkzeug import secure_filename\n\n\nUPLOAD_FOLDER = \"sketches/\"\nUPLOAD_FILENAME = \"demonstration.png\"\nMATCH_FOLDER = \"matches/\"\nABM = \"flocking\"\n\napp = Flask(__name__)\napp.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\n\n\[email protected]('/')\ndef root():\n return \"You are here: /\"\n\n\[email protected](\"/index\")\ndef index():\n return render_template(\"index.html\")\n\[email protected](\"/upload\", methods=[\"GET\", \"POST\"])\ndef upload_file():\n\n if (request.method == \"POST\"):\n # get the base64 encoded image\n encodedImage = request.form[\"encodedImage\"]\n\n # save to disk\n with open(\"demonstration.jpg\", \"wb\") as outFile:\n outFile.write(base64.b64decode(encodedImage))\n\n # convert to png\n os.system(\"convert demonstration.jpg demonstration.png\")\n os.system(\"rm -f demonstration.jpg\")\n \n # clear old data\n os.system(\"rm -f \" + UPLOAD_FOLDER + \"*\")\n os.system(\"rm -f \" + MATCH_FOLDER + \"*\")\n\n # save file in selected location\n fileName = os.path.join(app.config[\"UPLOAD_FOLDER\"], UPLOAD_FILENAME)\n os.system(\"mv demonstration.png \" + fileName)\n\n # initiate matching and call pipeline background process\n os.system(\"rm -rf \" + \"../data/predictions/\" + ABM + \"/\" + MATCH_FOLDER + \"/*\")\n os.system('curl localhost:5001/match')\n os.system(\"./do_pipeline.sh &\")\n \n # refresh will be a get request\n return render_template(\"redirect.html\")\n\n\[email protected](\"/redirect\")\ndef redirect():\n # get background process status\n fileExists = os.path.isfile(\"../data/predictions/\" + ABM + \"/\" + MATCH_FOLDER + \"predicted_alps.txt\")\n\n return str(fileExists)\n\n\[email protected]('/results')\ndef results():\n return render_template(\"results.html\")\n\n\[email protected]('/alps')\ndef get_suggested_alps():\n # get suggested ALPs from file\n # this will be the response sent\n with open(\"../data/predictions/\" + ABM + \"/\" + MATCH_FOLDER + \"predicted_alps.txt\") as inFile:\n prediction = inFile.readlines()[0]\n suggestedAlps = prediction.split(\" (\")[0]\n\n return suggestedAlps\n\n\[email protected]('/match')\ndef get_match():\n # return matched image as json\n imageName = glob.glob(\"matches/*.png\")[0]\n with open(imageName, \"rb\") as imageFile:\n imageString = base64.b64encode(imageFile.read())\n\n return imageString\n\n\[email protected]('/prediction')\ndef get_prediction():\n # return matched image as json\n imageFolder = glob.glob(\"../data/predictions/\" + ABM + \"/\" + MATCH_FOLDER + \"/images_*\")[0]\n imageName = imageFolder + \"/0_200.png\"\n with open(imageName, \"rb\") as imageFile:\n imageString = base64.b64encode(imageFile.read())\n\n return imageString\n" }, { "alpha_fraction": 0.729411780834198, "alphanum_fraction": 0.729411780834198, "avg_line_length": 16, "blob_id": "9b5e514746b13a44a2de2147a8dee5e93c9a49cf", "content_id": "c241871ffcbca08fc11761bb6eb8677ac11fe5ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 85, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/src/sampling/sampling.sh", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "# generate raw file\n#./sampling_raw.sh\n\n# process raw file\n./sampling_process_raw.sh\n" }, { "alpha_fraction": 0.6430445909500122, "alphanum_fraction": 0.654199481010437, "avg_line_length": 42.485713958740234, "blob_id": "5b7dd6773462dab1fca49591af102fe140960d1b", "content_id": "dda6cd16ec97f0010a4b62e76231ba9d1702d50f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1524, "license_type": "no_license", "max_line_length": 120, "num_lines": 35, "path": "/data/dataset_analysis/simulations/plot_fm_prediction_error.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\npointSelectionMethods = [\"error_weighted\", \"min_error\"]\ndemonstrations = [\"1\", \"2\", \"3\"]\n\n# plot fm prediction error for various demonstrations\nfor pointSelectionMethod in pointSelectionMethods:\n # plot unguided and guided data\n plt.figure()\n\n print(pointSelectionMethod)\n \n for demonstration in demonstrations:\n with open(pointSelectionMethod + \"/\" + demonstration + \"/unguided/fm_prediction_error.txt\") as unguidedDataFile:\n unguidedData = [eval(x) for x in unguidedDataFile.readlines()]\n with open(pointSelectionMethod + \"/\" + demonstration + \"/guided/fm_prediction_error.txt\") as guidedDataFile:\n guidedData = [eval(x) for x in guidedDataFile.readlines()]\n\n mean = np.mean(unguidedData)\n sigma = np.std(unguidedData)\n plot1 = plt.errorbar(eval(demonstration), mean, sigma, fmt='s', color='b', markersize=20, elinewidth=2)\n print(mean, sigma)\n \n mean = np.mean(guidedData)\n sigma = np.std(guidedData)\n plot2 = plt.errorbar(eval(demonstration), mean, sigma, fmt='s', color='g', markersize=20, elinewidth=2)\n print(mean, sigma)\n \n plt.gca().set_ylim(bottom=-0.01)\n plt.legend([plot1, plot2], [\"Unguided Data\", \"Guided Data\"], loc=\"best\")\n plt.xticks(range(len(demonstrations)+2), [\" \", \"a\", \"b\", \"c\", \" \"])\n plt.xlabel(\"Demonstration (Demo)\")\n plt.ylabel(\"FM Prediction Error\")\n plt.savefig(pointSelectionMethod + \".png\")\n\n\n" }, { "alpha_fraction": 0.628926694393158, "alphanum_fraction": 0.6511780023574829, "avg_line_length": 23.253969192504883, "blob_id": "303c78a0e88e4f0ff01976c0e5b4273178738f70", "content_id": "8be2c577064da046efa5eff6857b099eb04f8b71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1528, "license_type": "no_license", "max_line_length": 92, "num_lines": 63, "path": "/src/sampling/forest_fire/sample_forest_fire.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "from model import ForestFire\nimport random\nimport numpy as np\n\n# only one ALP\n# density (0,1)\n\n# constant ALPs\nHEIGHT = 100\nWIDTH = 100\n\n# constants\n# max_iters in the example is specified as 1000\n# so we run for 1000 steps\nINDEPENDENT_VARIABLES = 1\nDEPENDENT_VARIABLES = 1\nRUNS = 10000\nSAMPLES = 10\nTIME_LAPSE = 200\n\ndef main_forest_fire(density=0.65):\n # instantiate and run model\n model = ForestFire(height=HEIGHT, width=WIDTH, density=density)\n\n # run time lapse\n for i in range(TIME_LAPSE):\n model.step()\n\n # collect data for next steps\n dependentValues = []\n\n for i in range(SAMPLES):\n # step\n model.step()\n\n # read data\n data = model.datacollector.get_model_vars_dataframe()\n burnedTrees = np.array(list(data.BurnedOut)[-SAMPLES-1:])\n fineTrees = np.array(list(data.Fine)[-SAMPLES-1:])\n initialTrees = burnedTrees + fineTrees\n dependentValues.append(list(burnedTrees/initialTrees))\n\n # print line corresponding to this execution\n line = str(density)\n for dependentValue in dependentValues:\n line += \" \" + str(dependentValue).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\n print(line)\n \ndef main():\n\n # header string\n print(\"i\"*INDEPENDENT_VARIABLES + \"d\"*DEPENDENT_VARIABLES)\n\n for run in range(RUNS):\n\n # sample random ALPs\n # cop density + citizen density should be less than 1 \n density = random.random()\n \n # run model using those ALPs\n main_forest_fire(density)\n\nmain()\n" }, { "alpha_fraction": 0.7006369233131409, "alphanum_fraction": 0.7707006335258484, "avg_line_length": 21.428571701049805, "blob_id": "3ca71683ff14d9dfb27a0017fde89a5bc7de12d7", "content_id": "2ed16a83192c7e76496fc97354113e21bbd05dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/src/domainconfs/aids.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "TRAINING_SIZE = 300\nVALIDATION_SIZE = 1000\nRM_GRANULARITY = 55\nREGRESSION = \"sklearn.neighbors.KNeighborsRegressor()\"\n\nNUM_INDEPENDENT = 2\nNUM_DEPENDENT = 2\n" }, { "alpha_fraction": 0.6805850863456726, "alphanum_fraction": 0.6828457713127136, "avg_line_length": 41.97142791748047, "blob_id": "fbffb23a39ddbe55972330d45169fce39ff864ba", "content_id": "03dc45db4a8abb59562161fde903a2043907ebc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7520, "license_type": "no_license", "max_line_length": 287, "num_lines": 175, "path": "/src/match_sketches.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "import os\nimport sampling.utils.featurize_image as fi\nimport sys\nimport glob\nimport amf.misc\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import LSHForest\nimport numpy as np\nimport cPickle\nimport random\n\ndef initialize_featurization():\n # edit scale data file name based on descriptor setting\n scaleDataFileNamePrefix = os.environ[\"SCALE_DATA_FILE\"].split(\".\")[0]\n scaleDataFileNameExtension = os.environ[\"SCALE_DATA_FILE\"].split(\".\")[1]\n scaleDataFileName = scaleDataFileNamePrefix + \"_\" + os.environ[\"ABM\"]\n if(bool(eval(os.environ[\"IMAGES\"])) == True):\n scaleDataFileName += \"_\" + os.environ[\"DESCRIPTOR_SETTING\"].lower()\n scaleDataFileName += \".\" + scaleDataFileNameExtension\n\n # get scale information\n with open(\"sampling/\" + os.environ[\"ABM\"] + \"/\" + scaleDataFileName, \"r\") as scaleDataFile:\n # read in order of writing in map.py\n maxs = eval(scaleDataFile.readline().strip(\"\\n\"))\n mins = eval(scaleDataFile.readline().strip(\"\\n\"))\n\n dependentMaxs = maxs[-eval(os.environ[\"NUM_DEPENDENT\"]):]\n dependentMins = mins[-eval(os.environ[\"NUM_DEPENDENT\"]):]\n dependentValuesMax = {i: dependentMaxs[i] for i in range(len(dependentMaxs))}\n dependentValuesMin = {i: dependentMins[i] for i in range(len(dependentMins))}\n scaleData = [dependentValuesMax, dependentValuesMin]\n\n \n # load feature extractor if needed\n net = fi.get_network(os.environ[\"DESCRIPTOR_SETTING\"])\n\n return [net, scaleData]\n\ndef get_dependent_values_image(net, scaleData, imageName):\n # define dependent values\n dependentValuesImage = fi.get_features(net, imageName, os.environ[\"DESCRIPTOR_SETTING\"])\n # normalize\n [dependentValuesMax, dependentValuesMin] = scaleData\n dependentValuesImage = tuple((dependentValuesImage[index] - dependentValuesMin[index])/(dependentValuesMax[index] - dependentValuesMin[index]) if (dependentValuesMax[index] > dependentValuesMin[index]) else dependentValuesMax[index] for index in range(len(dependentValuesImage)))\n\n return dependentValuesImage\n\ndef get_dependent_values_folder(net, scaleData, folderName, downsample=1):\n # initialize collection of dependent values\n imageNamesFolder = []\n dependentValuesFolder = []\n \n # expect everything in this folder to be an image\n # read sketches data\n allFileNames = glob.glob(folderName + \"/*.*\")\n allFileNames = random.sample(allFileNames, int(len(allFileNames)/downsample))\n for imageName in sorted(allFileNames):\n # featurize image\n print >> sys.stderr, (\"now processing \" + imageName) \n dependentValuesImage = get_dependent_values_image(net, scaleData, imageName)\n\n # append to list\n imageNamesFolder.append(imageName)\n dependentValuesFolder.append(dependentValuesImage)\n\n return [imageNamesFolder, dependentValuesFolder]\n\ndef do_matching_lsh(net, scaleData, imageNamesSketchList, dependentValuesSketchList):\n lshModelFileName = \"../../_swarm-lfd-data/\" + os.environ[\"ABM\"] + \"/lsh_forest_model\" + \"_\" + os.environ[\"DESCRIPTOR_SETTING\"].lower() + \".p\"\n\n if(os.path.exists(lshModelFileName) == False):\n # no model found in directory\n # train a locality sensitive hashing model on the simulation data\n [imageNamesSimulationList, dependentValuesSimulationList] = get_dependent_values_folder(net, scaleData, \"../../_swarm-lfd-data/\" + os.environ[\"ABM\"] + \"/images\")\n lshForest = LSHForest()\n lshForest.fit(dependentValuesSimulationList)\n\n # use disk to cache results\n with open(lshModelFileName, \"wb\") as outFile:\n cPickle.dump(lshForest, outFile)\n with open(\"image_names_simulation.p\", \"wb\") as outFile:\n cPickle.dump(imageNamesSimulationList, outFile)\n else:\n # use existing model\n with open(lshModelFileName, \"rb\") as inFile:\n lshForest = cPickle.load(inFile)\n with open(\"../../_swarm-lfd-data/\" + os.environ[\"ABM\"] + \"/image_names_simulation.p\", \"rb\") as inFile:\n imageNamesSimulationList = cPickle.load(inFile)\n\n # now match\n print >> sys.stderr, \"\\n\"\n print >> sys.stderr, \"now matching\"\n matches = []\n for imageNameSketch, dependentValuesSketch in zip(imageNamesSketchList, dependentValuesSketchList):\n minDistance = float('Inf')\n minDistanceImage = None\n distances = []\n\n # use lsh to get neighbors\n [distances, indices] = lshForest.kneighbors(np.array(dependentValuesSketch).reshape(1, -1))\n\n # use indices to get closest images\n # read image name and image feature files\n # then only compare with the few retrieved\n # this is only useful if LSH features are different from features used in the loop\n for index in indices[0]:\n imageNameSimulation = imageNamesSimulationList[index]\n dependentValuesSimulation = get_dependent_values_image(net, scaleData, imageNameSimulation)\n\n # compute distance\n distance = amf.misc.distance(dependentValuesSketch, dependentValuesSimulation)\n if(distance < minDistance):\n minDistance = distance\n minDistanceImage = imageNameSimulation\n\n # pick minimum distance\n matches.append((imageNameSketch, minDistanceImage))\n\n # clear memory\n del lshForest\n\n return matches\n\ndef do_matching(net, scaleData, imageNamesSketchList, dependentValuesSketchList):\n\n # get raw features\n # [imageNamesSimulationList, dependentValuesSimulationList] = get_dependent_values_folder(net, scaleData, \"../../_swarm-lfd-data/\" + os.environ[\"ABM\"] + \"/images\", downsample=11000)\n [imageNamesSimulationList, dependentValuesSimulationList] = get_dependent_values_folder(net, scaleData, \"../../_swarm-lfd-data/\" + os.environ[\"ABM\"] + \"/images_1000\")\n\n # now match\n print >> sys.stderr, \"\\n\"\n print >> sys.stderr, \"now matching\"\n matches = []\n for imageNameSketch, dependentValuesSketch in zip(imageNamesSketchList, dependentValuesSketchList):\n minDistance = float('Inf')\n minDistanceImage = None\n distances = []\n\n for imageNameSimulation, dependentValuesSimulation in zip(imageNamesSimulationList, dependentValuesSimulationList):\n # compute distance\n distance = amf.misc.distance(dependentValuesSketch, dependentValuesSimulation)\n if(distance < minDistance):\n minDistance = distance\n minDistanceImage = imageNameSimulation\n\n # pick minimum distance\n matches.append((imageNameSketch, minDistanceImage))\n\n return matches\n\ndef main():\n # get sketches folder\n sketchesFolder = sys.argv[1]\n \n # initialize and retrieve features\n [net, scaleData] = initialize_featurization()\n [imageNamesSketchList, dependentValuesSketchList] = get_dependent_values_folder(net, scaleData, sketchesFolder)\n\n # matches = do_matching_lsh(net, scaleData, imageNamesSketchList, dependentValuesSketchList)\n matches = do_matching(net, scaleData, imageNamesSketchList, dependentValuesSketchList)\n\n # print matches\n # save matches\n simulationSaveFolder = sys.argv[2]\n print >> sys.stderr, \"\\n\"\n print >> sys.stderr, \"now printing matches:\"\n print >> sys.stderr, \"\\n\"\n for x, y in matches:\n print >> sys.stderr, x\n print >> sys.stderr, y\n print >> sys.stderr, \"\\n\"\n\n os.system(\"cp \" + y + \" \" + simulationSaveFolder)\n \nmain()\n" }, { "alpha_fraction": 0.7051281929016113, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 21.285715103149414, "blob_id": "1fd3ccaa98a7e798627221abe13a8c2be0c6fee9", "content_id": "e335465a61bbee713df5f15a1d4610f47810a677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/src/domainconfs/civil_violence.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "TRAINING_SIZE = 330\nVALIDATION_SIZE = 100\nRM_GRANULARITY = 40\nREGRESSION = \"sklearn.neighbors.KNeighborsRegressor()\"\n\nNUM_INDEPENDENT = 3\nNUM_DEPENDENT = 3\n" }, { "alpha_fraction": 0.6403161883354187, "alphanum_fraction": 0.7351778745651245, "avg_line_length": 22, "blob_id": "02ab3a125819d197a0b3c5083757951007e66128", "content_id": "effe63c2db702f02d9b6fa78d4ead36edce8b704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/src/domainconfs/wolf_sheep.py", "repo_name": "karanbudhraja/PABMD", "src_encoding": "UTF-8", "text": "#TRAINING_SIZES = range(40, 440, 40)\n#VALIDATION_SIZE = 200\n#RM_GRANULARITIES = range(15, 65, 10)\n\nTRAINING_SIZE = 40\nVALIDATION_SIZE = 10\nRM_GRANULARITY = 20\nREGRESSION = \"sklearn.neighbors.KNeighborsRegressor()\"\n\nNUM_INDEPENDENT = 5\nNUM_DEPENDENT = 2\n" } ]
63
agamdamaraju/SentimentAnalysis
https://github.com/agamdamaraju/SentimentAnalysis
882808e49f2aba8aaa3754e637db37dfbbe22dbc
66b154605d063747557485b58fcebe3bdfe3f017
d84bb130efff2c97bad0c5a0f44f44e50f72850e
refs/heads/main
2023-01-24T07:33:32.928004
2020-12-03T14:41:06
2020-12-03T14:41:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7746614813804626, "alphanum_fraction": 0.7756286263465881, "avg_line_length": 33.46666717529297, "blob_id": "2ac342185bad117905486901764b0b8e8ce530e7", "content_id": "4cfc2ea4b1c894a07b9e73e2354b9467c855d764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 152, "num_lines": 30, "path": "/README.md", "repo_name": "agamdamaraju/SentimentAnalysis", "src_encoding": "UTF-8", "text": "# Sentiment Analysis\nA project of sentiment analysis based on twitter dataset.\n\n# Motivation\nSentiment analysis is extremely useful in social media monitoring as it allows us to gain an overview of the wider public opinion behind certain topics.\n\n# About\nThis is purely Python programming based project from scratch, which classifies the overall twitter data based on negative and positive scores. \n\n# Process overview\n- Reading dataset.\n- Data preprocessing\n - Lower case conversion.\n - Punctualtion removal.\n - Tokenization (Organising tweets, retweet count and reply count).\n - Modeling (No Packages are used).\n - Positive score calculation based on predefined positive words.\n - Negative score calculation based on predefined negative words.\n - Net score calculation.\n- Writing calculated data in csv file.\n\n# Editor\nSpyder 4\n\n# Scripts \nPython(.py) [Code file](https://github.com/AgamDamaraju/SentimentAnalysis/) is attached.\n\n# Credits\n- [University of Michigan](https://umich.edu/)\n- [Coursera](https://www.coursera.org)\n" }, { "alpha_fraction": 0.5751683115959167, "alphanum_fraction": 0.5804038643836975, "avg_line_length": 29.11627960205078, "blob_id": "b119b896710114fbf5c93a83b56dde98ca483e90", "content_id": "392e5b60a4ac7e68b52cc20e5f23862ac3e0867f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2674, "license_type": "no_license", "max_line_length": 106, "num_lines": 86, "path": "/sentiment_analysis.py", "repo_name": "agamdamaraju/SentimentAnalysis", "src_encoding": "UTF-8", "text": "# List of punctuations\r\npunctuation_chars = [\"'\", '\"', \",\", \".\", \"!\", \":\", \";\", '#', '@']\r\n\r\n# Function for removing punctuations from the word\r\ndef strip_punctuation(word):\r\n punct = \",\".join(punctuation_chars) \r\n for i in word:\r\n if i in punct:\r\n word = word.replace(i,\"\")\r\n return word\r\n\r\n# Function for counting positive words\r\ndef get_pos(sentence):\r\n punctWords = sentence.split()\r\n pos_wrds = 0\r\n for i in punctWords:\r\n words = strip_punctuation(i)\r\n words_lower = words.lower()\r\n if words_lower in positive_words:\r\n pos_wrds += 1\r\n return pos_wrds\r\n\r\n# Function for counting negative words\r\ndef get_neg(sentence):\r\n punctWords = sentence.split()\r\n neg_wrds = 0\r\n for i in punctWords:\r\n words = strip_punctuation(i)\r\n words_lower = words.lower()\r\n if words_lower in negative_words:\r\n neg_wrds += 1\r\n return neg_wrds\r\n\r\n# Function for positive words collection\r\npositive_words = []\r\nwith open(\"positive_words.txt\") as pos_f:\r\n for lin in pos_f:\r\n if lin[0] != ';' and lin[0] != '\\n':\r\n positive_words.append(lin.strip())\r\n\r\n# Function for negative words collection\r\nnegative_words = []\r\nwith open(\"negative_words.txt\") as pos_f:\r\n for lin in pos_f:\r\n if lin[0] != ';' and lin[0] != '\\n':\r\n negative_words.append(lin.strip())\r\n\r\n# Collection for tweets, retweets and replies\r\nreT_reP = []\r\ntxt = []\r\nwith open(\"project_twitter_data.csv\") as data_read:\r\n for lin in data_read:\r\n for char in lin:\r\n if char == \"\\n\":\r\n lin = lin.replace(char, \"\")\r\n lst = lin.split(\",\")\r\n reT_reP.append(tuple(lst[-2:]))\r\n txt.append(tuple(lst[:-2]))\r\n retrep = reT_reP[1:] \r\n text = txt[1:] \r\n \r\nretweets = []\r\nreplies = []\r\nfor tup in retrep:\r\n retweets.append(tup[0])\r\n replies.append(tup[1])\r\n\r\n# Evaluation of Positive and Negative scores\r\npositive_score = []\r\nnegative_score = []\r\nfor tup in text: \r\n for txt in tup:\r\n positive_score.append(get_pos(txt))\r\n negative_score.append(get_neg(txt))\r\n\r\n# Evaluation of Net score\r\nnet_score = []\r\nfor ps, ns in zip(positive_score, negative_score):\r\n nt = ps-ns\r\n net_score.append(nt)\r\n\r\n# Writing results in csv format \r\nwith open(\"resulting_data.csv\", \"w\") as data_write:\r\n data_write.write(\"Number of Retweets, Number of Replies, Positive Score, Negative Score, Net Score\\n\")\r\n for rt, rp, ps, ns, nt in zip(retweets, replies, positive_score, negative_score, net_score):\r\n data_write.write(str(rt)+\", \"+str(rp)+\", \"+str(ps)+\", \"+str(ns)+\", \"+str(nt)+\"\\n\")" } ]
2
sinramyeon/youtubewidget
https://github.com/sinramyeon/youtubewidget
5729e84c10cc2ec75af041a8ae047be88b03a726
fbd765f322f9b3d0082d45d7eca82c384031afa3
a4b05502ed720ecbd5d36068e510e9f4bee729bf
refs/heads/master
2021-09-08T07:04:52.267137
2018-03-08T05:30:11
2018-03-08T05:30:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6171284914016724, "alphanum_fraction": 0.6171284914016724, "avg_line_length": 23.060606002807617, "blob_id": "b62fcb0075f4691eadd65e79422b26ebb5245563", "content_id": "e10d6c707ff5f83f0d9131fb7a317802cc9ba5de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 794, "license_type": "no_license", "max_line_length": 87, "num_lines": 33, "path": "/youtubeTitle.py", "repo_name": "sinramyeon/youtubewidget", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom pywinauto import Desktop\nimport platform\nimport appscript\n\ndef FormatUrl(url):\n if url and not url.startswith(\"https://\"): \n return \"http://\" + url\n return url\n\ndef GetURL() : \n \n if platform.system() == \"Darwin\" :\n urls = appscript.app('Google Chrome').windows.tabs.URL()\n print(platform)\n \n else :\n urls = appscript.app('Google Chrome').windows.tabs.URL()\n\n chrome_window = Desktop(backend=\"uia\").window(class_name_re='Chrome')\n chrome = chrome_window['Google Chrome']\n\n address_bar_wrapper = chrome_window['Google Chrome'].main.Edit.wrapper_object()\n\n url = address_bar_wrapper.legacy_properties()\n\n print(url['Value'])\n\n\n \n \n def GetTitle() \n: pass\n" }, { "alpha_fraction": 0.8481012582778931, "alphanum_fraction": 0.8481012582778931, "avg_line_length": 39, "blob_id": "aef7e7807472eb71fe62dbe210a89a21ad9f8911", "content_id": "6336181f911ae6f1d42b7b9af9e9411832516ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 63, "num_lines": 2, "path": "/README.md", "repo_name": "sinramyeon/youtubewidget", "src_encoding": "UTF-8", "text": "# youtubewidget\nsimple youtube widget for windows made by python with pywinauto" } ]
2
Z-Shuming/NXSpider
https://github.com/Z-Shuming/NXSpider
4d0fcf693bf004e22747b8af28bf4c7b27170c75
68e588c0612d0ab2af3a820ff88ca24d698ceeb7
d605fdd342d09e9ad696a3a729e20ee07753c712
refs/heads/master
2022-01-12T23:08:27.402606
2018-12-17T15:36:03
2018-12-17T15:36:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5572916865348816, "alphanum_fraction": 0.625, "avg_line_length": 13.769230842590332, "blob_id": "027b9a5be43029169a1f49de2046171a372c7ddf", "content_id": "19bce644bfaed95012b5d8f501f37dc2ed6af270", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "permissive", "max_line_length": 35, "num_lines": 13, "path": "/NXSpider/debug_test/test.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/26.\n# email to [email protected]\n#\n\nfrom NXSpider.spider.mp3 import Mp3\n\nm1=Mp3(__parse_recursion__={})\nm=Mp3()\n\nprint('a')\n" }, { "alpha_fraction": 0.5741106867790222, "alphanum_fraction": 0.5839921236038208, "avg_line_length": 34.30232620239258, "blob_id": "c5f018043076de0f21cb1cc885ece28ee375d3b6", "content_id": "24d33f857c7a2f47650656a4986dada0d36e2c6a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3036, "license_type": "permissive", "max_line_length": 119, "num_lines": 86, "path": "/NXSpider/bin/show_ctrl.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/19.\n# email to [email protected]\n#\nimport sys\n\nfrom cement.core.controller import expose\nfrom terminaltables import AsciiTable\n\nfrom NXSpider.bin.base_ctrl import NXSpiderBaseController, py2_decoding\nfrom NXSpider.bin.print_as_table import print_mp3s, print_playlist, print_users, print_albums, print_artists, print_mvs\nfrom NXSpider.common import PYTHON2, log\nfrom NXSpider.spider import api\nfrom NXSpider.spider.api import search_types, PLAYLIST_CLASSES\n\nPRINT_ATTR_FUNC_MAP = {\n 'mp3': ['songs', print_mp3s],\n 'playlist': ['playlists', print_playlist],\n 'user': ['userprofiles', print_users],\n 'artist': ['artists', print_artists],\n 'album': ['albums', print_albums],\n 'mv': ['mvs', print_mvs],\n}\n\n\nclass ShowController(NXSpiderBaseController):\n class Meta:\n label = \"show\"\n stacked_on = 'base'\n description = \"NXSpider\"\n\n @expose(help=\"search [-ar <artist>] [-pl <playlist>] \"\n \"[-ur <user>] [-mp3 <song>] [-ab <album>] [-mv <mv>]\")\n def search(self):\n search_key = 'mp3'\n key_num = 0\n for k, v in search_types.items():\n if getattr(self.app.pargs, k, None):\n search_key = k\n key_num += 1\n if key_num > 1:\n log.print_err(\"it could search by only one type\")\n\n # input must be decode in python2\n search_value = getattr(self.app.pargs, search_key)\n search_value = py2_decoding(search_value)\n\n res = api.search(search_value, stype=search_key,\n offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 50)\n\n if not res:\n log.print_info(\"nothing found!\")\n return\n\n if search_key in PRINT_ATTR_FUNC_MAP:\n func = PRINT_ATTR_FUNC_MAP[search_key][1] # type: function\n value = (res.get(PRINT_ATTR_FUNC_MAP[search_key][0], [])) # type: list\n func(value)\n\n @expose(help=\"show artists ablum, usage: sw-ar-ab -ar <artist_id> [-offset <offset>] [-limit <limit>]\")\n def sw_ar_ab(self):\n if self.param_check(['artist'], sys._getframe().f_code.co_name) is False:\n return\n\n artistid = self.app.pargs.artist # type: list\n artist_detail = api.get_artist_album(artistid,\n offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 50)\n\n print_albums(artist_detail['hotAlbums'])\n pass\n\n @expose(help=\"show user playlist, usage: sw-ur-pl -ur <user_id,id1,id2>\")\n def sw_ur_pl(self):\n # todo\n pass\n\n @expose(help=\"show all playlist classes, usage: sw-pl-classes\")\n def sw_pl_classes(self):\n table = AsciiTable([[\"Group\", \"Classes(which can be as a input)\"]])\n table_data = [[k, ', '.join(v)] for k, v in PLAYLIST_CLASSES.items()]\n table.table_data.extend(table_data)\n print(table.table)\n" }, { "alpha_fraction": 0.6136986017227173, "alphanum_fraction": 0.6356164216995239, "avg_line_length": 15.590909004211426, "blob_id": "701e757e158767823c9594c3083424df6fd2f639", "content_id": "1304c1c7d11f468089209fc287f77c8c06c59d0e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "permissive", "max_line_length": 42, "num_lines": 22, "path": "/NXSpider/common/log.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\n\nfrom colorama import Fore\nfrom colorama import init\ninit(autoreset=True)\n\n\ndef print_err(msg):\n print(Fore.RED + \"ERROR: \" + msg)\n\n\ndef print_warn(msg):\n print(Fore.YELLOW + \"WARNING: \" + msg)\n\n\ndef print_info(msg):\n print(Fore.GREEN + \"INFO: \" + msg)\n" }, { "alpha_fraction": 0.5047619342803955, "alphanum_fraction": 0.5809524059295654, "avg_line_length": 16.16666603088379, "blob_id": "4eaae03255274fb51cbfa6f2850b7d21fca58981", "content_id": "aeae71ab6357de4e2205afdda64a00e9943a049a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "permissive", "max_line_length": 33, "num_lines": 6, "path": "/NXSpider/spider/__init__.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\n\n\n" }, { "alpha_fraction": 0.5492264628410339, "alphanum_fraction": 0.5724331736564636, "avg_line_length": 22.700000762939453, "blob_id": "ebc2a604a7f6c68d5488ad84a11f8c9ae271a78c", "content_id": "4c5a603565b92ea36bbf1b9c1daa670330e06022", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1422, "license_type": "permissive", "max_line_length": 72, "num_lines": 60, "path": "/setup.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/21.\n# email to [email protected]\n#\nimport os\n\nfrom setuptools import setup, find_packages, Command\n\nfrom NXSpider import version\n\n\nclass CleanCommand(Command):\n \"\"\"Custom clean command to tidy up the project root.\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')\n\n\nsetup(\n name=version.__title__,\n version=version.__version__,\n author=version.__author__,\n author_email=version.__author_email__,\n url=version.__url__,\n description=version.__description__,\n license=version.__license__,\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n \"mutagen>=1.40,<2\",\n \"terminaltables>=3,<4\",\n \"cryptography>=2.2,<3\",\n \"requests>=2.19,<3\",\n \"colorama>=0.3,<0.4\",\n \"cement>=2,<3\",\n \"six>=1,<=2\",\n ],\n entry_points={\n \"console_scripts\": [\"nxspider=NXSpider.bin.cli:main\"]\n },\n classifiers=[\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['music', 'netease', 'cli', 'spider'],\n cmdclass={\n 'clean': CleanCommand,\n },\n)\n" }, { "alpha_fraction": 0.5513054132461548, "alphanum_fraction": 0.6836673021316528, "avg_line_length": 24.734375, "blob_id": "3ec5d3668e67601c71558775ce639cf292a2b580", "content_id": "57c9b08ea340aa270d7ae8179ec0c1c27b83815e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1647, "license_type": "permissive", "max_line_length": 270, "num_lines": 64, "path": "/NXSpider/spider/common_keys.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/16.\n# email to [email protected]\n#\n\nimport json\nimport os\n\nfrom NXSpider.common import encrypt\nfrom NXSpider.common import tools, log\n\ncsrf_dict = {'csrf_token': 'csrf'}\n\n\ndef encrypted_request(obj):\n data = {\n 'params': create_params_by_dict(obj),\n 'encSecKey': encSecKey\n }\n return data\n\n\ndef create_params_by_dict(obj):\n try:\n return create_params_text(json.dumps(obj))\n except Exception as e:\n log.print_err('create params error: %s' % e)\n return None\n\n\ndef create_params_text(text):\n nonce = '0CoJUm6Qyw8W8jud'\n nonce2 = 16 * 'F'\n encText0 = encrypt.aes(text, nonce).decode(\"utf-8\")\n encText = encrypt.aes(encText0, nonce2)\n return encText\n\n\ndef decrpyt_params(text):\n nonce = '0CoJUm6Qyw8W8jud'\n nonce2 = 16 * 'F'\n decText0 = encrypt.aes_decode(text, nonce2)\n decText = encrypt.aes_decode(decText0, nonce)\n return decText\n\n\ndef rsa_encrypt(text, pubKey, modulus):\n text = text[::-1]\n rs = int(tools.hex(text), 16) ** int(pubKey, 16) % int(modulus, 16)\n return format(rs, 'x').zfill(256)\n\n\ndef create_secretKey(size):\n return (\n ''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size)))\n )[0:16]\n\n\nmodulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'\npubKey = '010001'\nsecKey = 16 * 'F'\nencSecKey = rsa_encrypt(secKey, pubKey, modulus)\n" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.6083333492279053, "avg_line_length": 24.714284896850586, "blob_id": "d038133f9a9151dcfaa23e5a68c544f417367b08", "content_id": "7e0b12770a6474b077c6a8b93230dac0978b95e3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "permissive", "max_line_length": 69, "num_lines": 14, "path": "/NXSpider/version.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/6.\n# email to [email protected]\n#\n\n__title__ = 'NXSpider'\n__version__ = '0.0.5'\n__description__ = 'NetEase X Spider, help u to have a better life!!!'\n__url__ = 'https://github.com/Grass-CLP/NXSpider'\n__author__ = 'LipsonChan'\n__author_email__ = '[email protected]'\n__license__ = 'BSD-3-Clause License'\n" }, { "alpha_fraction": 0.6242424249649048, "alphanum_fraction": 0.6393939256668091, "avg_line_length": 19.030303955078125, "blob_id": "9cd1a9375c1b6377c023880130429c51b8828c16", "content_id": "252eac29cf6a940c55a0a5f6cdbb531606d0dd7d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 660, "license_type": "permissive", "max_line_length": 44, "num_lines": 33, "path": "/NXSpider/model/export.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/20.\n# email to [email protected]\n#\n\nfrom NXSpider.common.config import Config\nif Config().get_no_mongo():\n from NXSpider.model.dict_model import *\nelse:\n from NXSpider.model.mongo_model import *\n\n\nmodel_download_url = 'download_url'\nmodel_is_download = 'downloaded'\nmodel_download_path = 'download_path'\n\n__all__ = [\n 'ConfigModel',\n 'UserModel',\n 'AlbumModel',\n 'PlaylistModel',\n 'Mp4Model',\n 'VideoModel',\n 'ArtistModel',\n 'Mp3Model',\n 'AuthorModel',\n 'update_dynamic_doc',\n 'model_download_url',\n 'model_is_download',\n 'get_one_model_by_key',\n]" }, { "alpha_fraction": 0.569031298160553, "alphanum_fraction": 0.5827612280845642, "avg_line_length": 19.484375, "blob_id": "548c7290f0283b25f877b06731245f14bd4f24b6", "content_id": "b3a8dea9e934ac6c937dd65fef4949dbad40d883", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1311, "license_type": "permissive", "max_line_length": 53, "num_lines": 64, "path": "/NXSpider/bin/models.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/19.\n# email to [email protected]\n#\nfrom NXSpider.spider.album import Album\nfrom NXSpider.spider.artist import Artist\nfrom NXSpider.spider.mp3 import Mp3\nfrom NXSpider.spider.mv import MV\nfrom NXSpider.spider.playlist import Playlist\nfrom NXSpider.spider.user import User\n\nno_rec_artist_mo = Artist(\n __parse_recursion__={}\n)\n\nno_rec_album_mo = Album(\n __model_rfilter__={'artist', 'songs'},\n __parse_recursion__={'artists': no_rec_artist_mo}\n)\n\nno_rec_mv_mo = MV(\n __parse_recursion__={'artists': no_rec_artist_mo}\n)\n\ndw_mp3_mo = Mp3(\n __parse_recursion__={\n 'artists': no_rec_artist_mo,\n 'album': no_rec_album_mo,\n 'mv': no_rec_mv_mo,\n }\n)\n\nplaylist_mo = Playlist(\n __model_rfilter__={'artist'},\n __parse_recursion__={\n 'mp3': dw_mp3_mo,\n 'creator': User(),\n }\n)\n\nartist_mo = Artist(\n __model_rfilter__={'artist'},\n __parse_recursion__={\n 'mp3': dw_mp3_mo,\n }\n)\n\nalbum_mo = Album(\n __model_rfilter__={'artist'},\n __parse_recursion__={\n 'mp3': dw_mp3_mo,\n 'artists': no_rec_artist_mo,\n }\n)\n\nartist_album_mo = Artist(\n __model_rfilter__={},\n __parse_recursion__={\n 'albums': album_mo,\n 'artists': no_rec_artist_mo,\n }\n)\n" }, { "alpha_fraction": 0.535625696182251, "alphanum_fraction": 0.5718929171562195, "avg_line_length": 25.75286102294922, "blob_id": "76d63045396b147e02afa192bca9e5e0fc41e2b3", "content_id": "c809ebc0f4c9702b46590bd142e9bbc7447792e2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12299, "license_type": "permissive", "max_line_length": 141, "num_lines": 437, "path": "/NXSpider/spider/api.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/24.\n# email to [email protected]\n#\nimport re\nfrom collections import OrderedDict\nfrom functools import reduce\n\nimport requests\n\nfrom NXSpider.common import log\nfrom NXSpider.spider.common_keys import encrypted_request\n\nbase_url = \"http://music.163.com\"\nbase_https_url = \"https://music.163.com\"\n\nheaders = {\n 'Referer': 'http://music.163.com/',\n 'Host': 'music.163.com',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n}\n\nTOP_LIST_ALL = {\n 0: ['云音乐新歌榜', '3779629'],\n 1: ['云音乐热歌榜', '3778678'],\n 2: ['网易原创歌曲榜', '2884035'],\n 3: ['云音乐飙升榜', '19723756'],\n 4: ['云音乐电音榜', '10520166'],\n 5: ['UK排行榜周榜', '180106'],\n 6: ['美国Billboard周榜', '60198'],\n 7: ['KTV嗨榜', '21845217'],\n 8: ['iTunes榜', '11641012'],\n 9: ['Hit FM Top榜', '120001'],\n 10: ['日本Oricon周榜', '60131'],\n 11: ['韩国Melon排行榜周榜', '3733003'],\n 12: ['韩国Mnet排行榜周榜', '60255'],\n 13: ['韩国Melon原声周榜', '46772709'],\n 14: ['中国TOP排行榜(港台榜)', '112504'],\n 15: ['中国TOP排行榜(内地榜)', '64016'],\n 16: ['香港电台中文歌曲龙虎榜', '10169002'],\n 17: ['华语金曲榜', '4395559'],\n 18: ['中国嘻哈榜', '1899724'],\n 19: ['法国 NRJ EuroHot 30周榜', '27135204'],\n 20: ['台湾Hito排行榜', '112463'],\n 21: ['Beatport全球电子舞曲榜', '3812895'],\n 22: ['云音乐ACG音乐榜', '71385702'],\n 23: ['云音乐嘻哈榜', '991319590']\n}\n\nPLAYLIST_CLASSES = OrderedDict([\n ('语种', ['华语', '欧美', '日语', '韩语', '粤语', '小语种']),\n ('风格', ['流行', '摇滚', '民谣', '电子', '舞曲', '说唱', '轻音乐', '爵士', '乡村', 'R&B/Soul', '古典', '民族', '英伦', '金属', '朋克', '蓝调', '雷鬼',\n '世界音乐', '拉丁', '另类/独立', 'New Age', '古风', '后摇', 'Bossa Nova']),\n ('场景', ['清晨', '夜晚', '学习', '工作', '午休', '下午茶', '地铁', '驾车', '运动', '旅行', '散步', '酒吧']),\n ('情感', ['怀旧', '清新', '浪漫', '性感', '伤感', '治愈', '放松', '孤独', '感动', '兴奋', '快乐', '安静', '思念']),\n ('主题', ['影视原声', 'ACG', '儿童', '校园', '游戏', '70后', '80后', '90后', '网络歌曲', 'KTV', '经典', '翻唱', '吉他', '钢琴', '器乐', '榜单', '00后'])\n])\n\nALL_CLASSES = reduce(lambda x, y: x + y, [v for k, v in PLAYLIST_CLASSES.items()])\n\nMV_TYPE = ['ALL', 'ZH', 'EA', 'KR', 'JP']\n\n\n# 搜索单曲(1),歌手(100),专辑(10),歌单(1000),用户(1002) *(type)*\nsearch_types = {\n 'mp3': 1,\n 'artist': 100,\n 'album': 10,\n 'playlist': 1000,\n 'user': 1002,\n 'mv': 1004,\n}\n\n\ndef api_request(url, data=None, method=\"get\", json=True,\n session=None, headers=headers, encrypt=True, https=False):\n \"\"\"\n request and try\n :param https:\n :param encrypt:\n :param url:\n :param data:\n :param method:\n :param json:\n :param session:\n :type session: requests.Session\n :param headers:\n :return:\n \"\"\"\n url = base_https_url + url if https else base_url + url\n request_obj = session or requests\n\n # update cookies\n if isinstance(request_obj, requests.Session):\n for cookie in request_obj.cookies:\n if cookie.name == '__csrf':\n data['csrf_token'] = cookie.value\n break\n\n # encrypt\n if encrypt:\n data = encrypted_request(data)\n\n method = 'get' if not data and method == 'get' else 'post'\n request_method = getattr(request_obj, method, None) or request_obj.get\n try:\n req = request_method(url, data=data, headers=headers, timeout=10)\n req.encoding = \"UTF-8\"\n res = req.json() if json else req.text\n # if session:\n # session.cookies.save()\n return res\n except ValueError as e:\n log.print_err(\"api do not return a valuable json\")\n return {}\n except requests.exceptions.RequestException as e:\n log.print_warn(\"request error: %s\" % url)\n return {}\n\n\ndef get_top_songlist(idx=0, offset=0):\n action = TOP_LIST_ALL[idx][1]\n res = api_request(action, json=False)\n if not res:\n return None\n\n songids = re.findall(r'/song\\?id=(\\d+)', res)\n if not songids:\n return None\n\n songids = list(set(songids))\n details = get_mp3_details(songids, offset=offset)\n return details\n\n\ndef get_mp3_link(song_id):\n # obj = '{\"ids\":[' + str(song_id) + '], br:\"320000\",csrf_token:\"csrf\"}'\n data = {'ids': [song_id], 'br': 320000, 'csrf_token': 'csrf'}\n url = \"/weapi/song/enhance/player/url\"\n res = api_request(url, data)\n if res and res['code'] == 200:\n return res['data'][0]['url']\n\n\ndef get_mp3_links(song_ids):\n # obj = '{\"ids\":[' + str(song_id) + '], br:\"320000\",csrf_token:\"csrf\"}'\n data = {'ids': song_ids, 'br': 320000, 'csrf_token': 'csrf'}\n url = \"/weapi/song/enhance/player/url\"\n res = api_request(url, data)\n if res and res['code'] == 200:\n return {x['id']: x['url'] for x in res['data']}\n\n\ndef get_mp3_details(song_ids, offset=0):\n tmpids = song_ids[offset:]\n tmpids = tmpids[0:100]\n tmpids = list(map(str, tmpids))\n action = '/api/song/detail?ids=[{}]'.format( # NOQA\n ','.join(tmpids))\n\n res = api_request(action)\n if res and res['code'] == 200:\n return {x['id']: x for x in res['songs']}\n\n\ndef get_mv_link(mv_id, r):\n data = {'id': mv_id, 'r': r, 'csrf_token': 'csrf'}\n url = \"/weapi/song/enhance/download/mv/url\"\n\n res = api_request(url, data)\n if res and res['code'] == 200:\n return res['data']['url']\n\n\ndef get_video_link(vid, r):\n data = {'ids': [vid], 'resolution': r, 'csrf_token': ''}\n url = \"/weapi/cloudvideo/playurl\"\n\n res = api_request(url, data)\n if res and res['code'] == 200:\n return res['urls'][0]['url']\n\n\ndef get_video_links(vids, r):\n data = {'ids': vids, 'resolution': r, 'csrf_token': ''}\n url = \"/weapi/cloudvideo/playurl\"\n\n res = api_request(url, data)\n if res and res['code'] == 200:\n return {x['id']: x['url'] for x in res['data']}\n\n\ndef get_mv_detail(mvid):\n data = {'id': mvid, 'csrf_token': 'csrf'}\n url = \"/weapi/v1/mv/detail\"\n\n res = api_request(url, data)\n return res.get('data', None)\n\n\ndef get_video_detail(vid):\n data = {'id': vid, 'csrf_token': 'csrf'}\n url = \"/weapi/cloudvideo/v1/video/detail\"\n\n res = api_request(url, data)\n return res.get('data', None)\n\n\ndef get_playlist_detail(playlist_id):\n url = \"/api/playlist/detail?id={}&upd\" \\\n .format(playlist_id)\n\n res = api_request(url)\n return res.get('result', None)\n\n\ndef get_playlist_detail_v3(id):\n action = '/weapi/v3/playlist/detail'\n data = {'id': id, 'total': 'true', 'csrf_token': 'csrf', 'limit': 1000, 'n': 1000, 'offset': 0}\n\n res = api_request(action, data)\n return res.get('playlist', None)\n\n\ndef get_top_playlists(category='全部', order='hot', offset=0, limit=50):\n \"\"\"\n get playlists, but not detail\n :param category:\n :param order:\n :param offset:\n :param limit:\n :return:\n \"\"\"\n action = u'/api/playlist/list?cat={}&order={}&offset={}&total={}&limit={}'.format( # NOQA\n category, order, offset, 'true' if offset else 'false',\n limit) # NOQA\n\n res = api_request(action)\n return res.get('playlists', None)\n\n\n# may be not useful\ndef get_playlist_classes():\n action = '/weapi/playlist/catalogue'\n res = api_request(action, json=False)\n if res and res['code'] == 200:\n return re\n\n\ndef get_playlist_catelogs():\n path = '/weapi/playlist/catalogue'\n return api_request(path, json=False)\n\n\ndef top_artists(offset=0, limit=100):\n action = '/api/artist/top?offset={}&total=false&limit={}'.format( # NOQA\n offset, limit)\n res = api_request(action)\n return res.get('artists', None)\n\n\ndef get_artists_songs(artist_id):\n action = '/api/artist/{}'.format(artist_id)\n res = api_request(action)\n if res and res['code'] == 200:\n return res\n\n\ndef get_artist_album(artist_id, offset=0, limit=50):\n action = '/api/artist/albums/{}?offset={}&limit={}'.format(\n artist_id, offset, limit)\n res = api_request(action)\n if res and res['code'] == 200:\n return res\n # if res and res['code'] == 200:\n # return res['hotAlbums']\n # return res.get('hotAlbums', None)\n\n\ndef get_album_detail(album_id):\n action = '/api/album/{}'.format(album_id)\n res = api_request(action)\n return res.get('album', None)\n\n\ndef search(s, stype=1, offset=0, total='true', limit=60):\n if isinstance(stype, str):\n if stype in search_types:\n stype = search_types[stype]\n else:\n return None\n\n action = '/api/search/get'\n data = {\n 's': s,\n 'type': stype,\n 'offset': offset,\n 'total': total,\n 'limit': limit\n }\n res = api_request(action, data, encrypt=False)\n return res.get('result', None)\n\n\n# has been change, fuck!\ndef user_playlist(uid, session=None, offset=0, limit=50):\n action = '/weapi/user/playlist'\n data = {'uid': uid, 'csrf_token': 'csrf',\n 'limit': limit, 'offset': offset,\n 'wordwrap': 7}\n\n res = api_request(action, data=data, session=session)\n return res.get('playlist', None)\n\n\ndef user_playlist_old(uid, offset=0, limit=100, session=None):\n action = '/api/user/playlist/?offset={}&limit={}&uid={}'.format( # NOQA\n offset, limit, uid)\n\n res = api_request(action, session=session)\n return res.get('playlist', None)\n\n\ndef my_subcount(session):\n action = '/weapi/subcount'\n res = api_request(action, data={}, session=session)\n\n return res\n\n\ndef my_mvs(session):\n action = '/weapi/mv/sublist'\n data = dict(\n offset=0,\n limit=1000,\n )\n res = api_request(action, data=data, session=session)\n\n return res.get('data', [])\n\n\ndef my_videos(session):\n action = '/weapi/cloudvideo/allvideo/sublist'\n data = dict(\n offset=0,\n limit=1000,\n )\n res = api_request(action, data=data, session=session)\n\n return res.get('data', [])\n\n\ndef hot_mvs():\n # todo, wait to find out how to encrypt and decrypt\n action = '/api/mv/toplist'\n action = '/api/mv/first'\n action = '/api/mv/hot' # well done\n\n data = dict(\n cat=u'内地',\n order='hot',\n offset=0,\n limit=50,\n )\n\n res = api_request(action, data=data)\n return res\n\n\ndef top_mvs(offset=0, limit=50):\n action = '/weapi/mv/toplist'\n data = dict(\n offset=offset,\n limit=limit,\n )\n\n res = api_request(action, data=data)\n return res.get('data', [])\n\n\ndef all_mvs(offset=0, limit=50):\n action = '/weapi/mv/all'\n data = dict(\n offset=offset,\n limit=limit,\n )\n res = api_request(action, data=data)\n return res.get('data', [])\n\n\ndef phone_login(username, password, session):\n \"\"\"\n\n :param username:\n :param password: must be encrypt by md5 hex\n :param session:\n :type session: requests.Session\n :return:\n \"\"\"\n action = '/weapi/login/cellphone'\n data = {\n 'phone': username,\n 'password': password,\n 'rememberLogin': 'true'\n }\n\n res = api_request(action, data=data, session=session)\n if res:\n return res\n\n\ndef login(username, password, session):\n \"\"\"\n :type username: str\n :param username:\n :param password: must be encrypt by md5 hex\n :param session:\n :type session: requests.Session\n :return:\n \"\"\"\n if username.isdigit():\n return phone_login(username, password, session)\n action = '/weapi/login'\n client_token = '1_jVUMqWEPke0/1/Vu56xCmJpo5vP1grjn_SOVVDzOc78w8OKLVZ2JH7IfkjSXqgfmh'\n session.cookies.load()\n data = {\n 'username': username,\n 'password': password,\n 'rememberLogin': 'true',\n 'clientToken': client_token,\n }\n res = api_request(action, data=data, session=session, https=True)\n if res:\n return res\n" }, { "alpha_fraction": 0.4895104765892029, "alphanum_fraction": 0.4979020953178406, "avg_line_length": 27.039215087890625, "blob_id": "4653ea5fad0727c600d18d60c33443c5b781692a", "content_id": "f065e805a7171a37d66075a72a457f183d3e1cf1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2860, "license_type": "permissive", "max_line_length": 94, "num_lines": 102, "path": "/NXSpider/spider/mp3.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/23.\n# email to [email protected]\n#\n\nimport re\n\nimport os\n\nfrom NXSpider.spider.album import Album\nfrom NXSpider.spider.api import get_mp3_link, get_mv_detail\nfrom NXSpider.spider.artist import Artist\nfrom NXSpider.spider.mv import MV\nfrom NXSpider.spider.base_driver import *\n\n\nclass Mp3(Music163Obj):\n __model_name__ = Mp3Model\n __model_rfilter__ = {\n 'artists', # read from album model\n }\n __parse_recursion__ = {\n 'artists': Artist(),\n 'album': Album(__model_rfilter__={'artist','songs'}),\n 'mv': MV(),\n }\n\n @attr_replace(attr_name='mvid', new_name='mv')\n def replace_mvid(self, obj):\n if obj != 0:\n obj = get_mv_detail(obj)\n return obj\n return None\n\n def pre_save(self, doc, obj):\n \"\"\"\n :param doc:\n :param obj:\n :type doc: Mp3Model\n :return:\n \"\"\"\n # set artists\n if getattr(doc, 'album', False) and getattr(doc['album'], 'artists', False):\n doc.artists = [a for a in doc['album']['artists']]\n\n def download_filename(self, doc):\n \"\"\"\n implement pls\n get a name to save file\n need be complete by child\n :param doc:\n :return:\n \"\"\"\n authors = ','.join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mp3_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = \"%s - %s.mp3\" % (author, mp3_name)\n return name\n\n def download_filename_full(self, doc):\n \"\"\"\n implement pls\n get a path to save file, by relative path\n need be complete by child\n :param doc:\n :type doc: Mp3Model\n :return:\n :rtype: str\n \"\"\"\n # authors = reduce(lambda x, y: x + ',' + y, [x['name'] for x in doc.artists])\n authors = ','.join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mp3_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = os.path.join(author, \"%s - %s.mp3\" % (author, mp3_name))\n return name\n\n def url_load(self, doc):\n \"\"\"\n implement pls\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n try:\n return get_mp3_link(doc['id'])\n except:\n return None\n\n def shortcut_self_path(self, doc):\n \"\"\"\n implement pls, not force\n return self short cut path\n :param doc:\n :return:\n \"\"\"\n result = []\n result.extend([os.path.join(\"artist\", re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', x['name']))\n for x in doc.artists])\n result.append(os.path.join(\"album\", re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc.album['name'])))\n return result\n" }, { "alpha_fraction": 0.7113480567932129, "alphanum_fraction": 0.7159177660942078, "avg_line_length": 27.54347801208496, "blob_id": "d7c2044b545a4fee68f749e57eceef7a4767b965", "content_id": "7da0e8176a8ee2036229790663b64970b35faf22", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1313, "license_type": "permissive", "max_line_length": 78, "num_lines": 46, "path": "/CONTRIBUTING.rst", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "============\nContributing\n============\n\nContributions are welcome, and they are greatly appreciated! Every\nlittle bit helps, and credit will always be given.\n\nYou can contribute in many ways:\n\nTypes of Contributions\n----------------------\n\nReport Bugs\n~~~~~~~~~~~\n\nReport bugs at https://github.com/Grass-CLP/NXSpider/issues.\n\nIf you are reporting a bug, please include:\n\n* Your operating system name and version.\n* Any details about your local setup that might be helpful in troubleshooting.\n* Detailed steps to reproduce the bug.\n\nFix Bugs\n~~~~~~~~\n\nLook through the GitHub issues for bugs. Anything tagged with \"bug\"\nis open to whoever wants to implement it.\n\nImplement Features\n~~~~~~~~~~~~~~~~~~\n\nLook through the GitHub issues for features. Anything tagged with\n\"feature\" is open to whoever wants to implement it.\n\nPull Request Guidelines\n-----------------------\n\nBefore you submit a pull request, check that it meets these guidelines:\n\n1. The pull request should include tests.\n2. If the pull request adds functionality, the docs should be updated. Put\n your new functionality into a function with a docstring, and add the\n feature to the list in README.rst.\n3. The pull request should work for Python 2.7, and 3.x, and for PyPy.\n and make sure that the tests pass for all supported Python versions.\n" }, { "alpha_fraction": 0.5103048086166382, "alphanum_fraction": 0.5161103010177612, "avg_line_length": 38.25925827026367, "blob_id": "cd2de5da1409d8c29ee08e9b2fba36f01adf56f1", "content_id": "e82b810c2886c563597b3771d591134967056261", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13794, "license_type": "permissive", "max_line_length": 116, "num_lines": 351, "path": "/NXSpider/bin/spider_ctrl.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/6.\n# email to [email protected]\n#\nimport sys\n\nimport requests\nfrom cement.core.controller import expose\n\nfrom NXSpider.bin.base_ctrl import NXSpiderBaseController, py2_decoding, py2_encoding\nfrom NXSpider.bin.print_as_table import print_playlist, print_albums\nfrom NXSpider.common import log\nfrom NXSpider.common.config import Config\nfrom NXSpider.spider import api\n\n\nclass SpiderController(NXSpiderBaseController):\n class Meta:\n label = \"spider\"\n stacked_on = 'base'\n description = \"NXSpider\"\n arguments = [\n (['-cls', '--cls'],\n dict(help=\"classes or catalogue\")),\n (['-ar', '--artist'],\n dict(help=\"artist\")),\n (['-pl', '--playlist'],\n dict(help=\"playlist\")),\n (['-ur', '--user'],\n dict(help=\"user\")),\n (['-mv', '--mv'],\n dict(help=\"artist\")),\n (['-mp3', '--mp3'],\n dict(help=\"song\")),\n (['-ab', '--album'],\n dict(help=\"album\")),\n (['-dw', '--download'],\n dict(help=\"download files, [<mv,mp3>], eg. -dw mv,mp3\")),\n (['-offset'],\n dict(help=\"offset index, eg. -offset 50\")),\n (['-limit'],\n dict(help=\"limit size, eg. -limit 50\")),\n (['-lu'],\n dict(help=\"login user or phone number\")),\n (['-lp'],\n dict(help=\"login password\")),\n ]\n\n @expose(help=\"spider mp3, usage: smp3 -mp3 <mp3_id,id1,id2> [-dw <mv,mp3>]\")\n def smp3s(self):\n from NXSpider.bin.models import dw_mp3_mo\n\n if self.param_check(['mp3'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n mp3s = self.app.pargs.mp3.split(',') # type: list\n details = api.get_mp3_details(mp3s)\n\n for mid, detail in details.items():\n log.print_info(u\"<{}>\".format(detail['name']))\n dw_mp3_mo.parse_model(detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(help=\"spider playlist, usage: spls -pl <playlist_id,id2,id3> [-dw <mv,mp3>] \")\n def spls(self):\n from NXSpider.bin.models import playlist_mo\n\n if self.param_check(['playlist'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n playlists = self.app.pargs.playlist.split(',') # type: list\n\n for pid in playlists:\n playlist_detail = api.get_playlist_detail(pid)\n if playlist_detail:\n log.print_info(u\"<{}> author:{}\".format(\n playlist_detail['name'],\n playlist_detail['creator']['nickname'],\n ))\n playlist_mo.parse_model(playlist_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(\n help=\"spider playlist of a classes, usage: scls-pls -cls <class name> \"\n \"[-dw <mv,mp3>] [-offset <offset>] [-limit <limit>]\")\n def scls_pls(self):\n from NXSpider.bin.models import playlist_mo\n\n if self.param_check(['cls'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n class_name = self.app.pargs.cls\n class_name = py2_decoding(class_name)\n\n if class_name != u\"全部\" and py2_encoding(class_name) not in api.ALL_CLASSES:\n log.print_err(\"class name is wrong, pls check by run : nxspider sw-pl-classes\")\n return\n\n playlists = api.get_top_playlists(category=class_name,\n offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 50) # type: list\n\n log.print_info(\"playlists bellow will be crawled\")\n print_playlist(playlists)\n\n for pl_obj in playlists:\n playlist_detail = api.get_playlist_detail(pl_obj['id'])\n if playlist_detail:\n log.print_info(u\"<{}> author:{}\".format(\n playlist_detail['name'],\n playlist_detail['creator']['nickname'],\n ))\n playlist_mo.parse_model(playlist_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(help=\"spider artist top mp3, usage: sar-top-mp3 -ar <artist_id,id1,id2> [-dw <mv,mp3>]\")\n def sar_top_mp3(self):\n from NXSpider.bin.models import artist_mo\n\n if self.param_check(['artist'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n artists = self.app.pargs.artist.split(',') # type: list\n\n for arid in artists:\n detail = api.get_artists_songs(arid)\n if detail is None:\n continue\n\n artist_detail = detail['artist']\n artist_detail['mp3'] = detail['hotSongs']\n\n log.print_info(u\"<{}>\".format(artist_detail['name']))\n artist_mo.parse_model(artist_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(help=\"spider artist albums, usage: sar-albums -ar <artist_id,id1,id2> [-dw <mv,mp3>] \"\n \"[-offset <offset>] [-limit <limit>]\")\n def sar_albums(self):\n from NXSpider.bin.models import artist_album_mo\n\n if self.param_check(['artist'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n artists = self.app.pargs.artist.split(',') # type: list\n\n for arid in artists:\n detail = api.get_artist_album(arid,\n offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 50)\n if detail is None:\n continue\n\n artist_detail = detail['artist']\n album_details = [api.get_album_detail(d['id']) for d in detail['hotAlbums']]\n album_details = [d for d in album_details if d]\n artist_detail['albums'] = album_details\n\n log.print_info(u\"<{}>\".format(artist_detail['name']))\n log.print_info(\"albums bellow will be crawled\")\n print_albums(artist_detail['albums'])\n\n artist_album_mo.parse_model(artist_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(help=\"spider album, usage: sab -ab <album_id,id1,id2> [-dw <mv,mp3>]\")\n def sab(self):\n from NXSpider.bin.models import album_mo\n\n if self.param_check(['album'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n albums = self.app.pargs.album.split(',') # type: list\n\n for pid in albums:\n album_detail = api.get_album_detail(pid)\n if album_detail is None:\n continue\n\n log.print_info(u\"{} artist:{}\".format(\n \"<\" + album_detail['name'] + \">\",\n album_detail['artist']['name'],\n ))\n album_mo.parse_model(album_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(\n help=\"spider a user's playlists, usage: sur-pls -ur <user id,id1,id2> \"\n \"[-dw <mv,mp3>] [-offset <offset>] [-limit <limit>]\")\n def sur_pls(self):\n from NXSpider.bin.models import playlist_mo\n\n if self.param_check(['user'], sys._getframe().f_code.co_name) is False:\n return\n\n download_type = self.parse_download()\n user_id = self.app.pargs.user\n playlists = api.user_playlist(user_id,\n offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 50)\n\n log.print_info(\"playlists bellow will be crawled\")\n print_playlist(playlists)\n\n for pl_obj in playlists:\n playlist_detail = api.get_playlist_detail(pl_obj['id'])\n if playlist_detail:\n log.print_info(u\"<{}> author:{}\".format(\n playlist_detail['name'],\n playlist_detail['creator']['nickname'],\n ))\n playlist_mo.parse_model(playlist_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n log.print_info(\"spider complete!~\")\n pass\n\n @expose(\n help=\"spider top mvs usage: stop-mv [-offset <offset>] [-limit <limit>]\")\n def stop_mvs(self):\n from NXSpider.bin.models import no_rec_mv_mo\n\n mvs = api.top_mvs(offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 50)\n mvs = [api.get_mv_detail(d['id']) for d in mvs]\n mvs = [d for d in mvs if d]\n\n for mv in mvs:\n no_rec_mv_mo.parse_model(mv, download_type=['mv'],\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n\n pass\n\n @expose(\n help=\"login and spider mv, usage: login-smv -lu <login user> [-lp <login password>]\")\n def login_smv(self):\n from NXSpider.bin.models import no_rec_mv_mo\n\n if self.param_check(['lu'], sys._getframe().f_code.co_name) is False:\n return\n\n plaintext_pwd = self.app.pargs.lp or None\n if plaintext_pwd is None:\n import getpass\n plaintext_pwd = getpass.getpass(\"Please input your password:\")\n\n session = requests.session()\n import hashlib\n password = hashlib.md5(plaintext_pwd).hexdigest()\n res = api.login(self.app.pargs.lu, password, session)\n if res.get('code', 0) != 200:\n log.print_err('login failed, msg: {}'.format(res.get('msg', \"none\")))\n exit()\n\n mvs = api.my_mvs(session)\n mvs = [api.get_mv_detail(d['id']) for d in mvs]\n mvs = [d for d in mvs if d]\n\n for mv in mvs:\n no_rec_mv_mo.parse_model(mv, download_type=['mv'],\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n\n log.print_info(\"spider complete!~\")\n\n pass\n\n @expose(\n help=\"login and spider playlists, usage: login-spls -lu <login user> [-lp <login password>] [-dw <mv,mp3>]\")\n def login_spls(self):\n if self.param_check(['lu', 'lp'], sys._getframe().f_code.co_name) is False:\n return\n\n from NXSpider.bin.models import playlist_mo\n\n plaintext_pwd = self.app.pargs.lp or None\n if plaintext_pwd is None:\n import getpass\n plaintext_pwd = getpass.getpass(\"Please input your password:\")\n\n session = requests.session()\n import hashlib\n password = hashlib.md5(plaintext_pwd).hexdigest()\n res = api.login(self.app.pargs.lu, password, session)\n if res.get('code', 0) != 200:\n log.print_err('login failed, msg: {}'.format(res.get('msg', \"none\")))\n exit()\n\n user_id = res['account']['id']\n download_type = self.parse_download()\n playlists = api.user_playlist(user_id,\n offset=self.app.pargs.offset or 0,\n limit=self.app.pargs.limit or 1000)\n\n log.print_info(\"playlists bellow will be crawled\")\n print_playlist(playlists)\n\n for pl_obj in playlists:\n playlist_detail = api.get_playlist_detail(pl_obj['id'])\n if playlist_detail:\n log.print_info(u\"<{}> author:{}\".format(\n playlist_detail['name'],\n playlist_detail['creator']['nickname'],\n ))\n playlist_mo.parse_model(playlist_detail,\n download_type=download_type,\n file_check=Config().get_file_check(),\n shortcuts_stack=[] if Config().get_shortcut() else None)\n log.print_info(\"spider complete!~\")\n pass\n" }, { "alpha_fraction": 0.6816326379776001, "alphanum_fraction": 0.6938775777816772, "avg_line_length": 34.07143020629883, "blob_id": "8ceb505bf02e62f8a8f6989ce6570c4b3d8aa755", "content_id": "e07373c047ac915cab80e31caee5c5ec56d82493", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 530, "license_type": "permissive", "max_line_length": 156, "num_lines": 14, "path": "/README_EN.md", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "NXSpider\n=================\n\nNXSpider, NetEase X Spider, A powerful for mp3,mv spider, can help you download mp3 and mv with media tags. Base on python, mongodb, and recursion algorithm\n\n[![Software License](https://img.shields.io/pypi/l/Django.svg)](LICENSE.md)\n![platform](https://img.shields.io/badge/python-2.7-green.svg)\n![platform](https://img.shields.io/badge/python-3.5-green.svg)\n\n[新手先看这里](SIMPLE_USE.md) | [历史版本在这里](VERSION.md) | [开发详情在这里](DEV.md)\n\n\n==========\ntodo: to be continue" }, { "alpha_fraction": 0.5470085740089417, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 14.600000381469727, "blob_id": "4f5921b94f057e383094bdbae9fea67d44788142", "content_id": "7bd839cb6197f9a53416f08da81d902af42764b3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 234, "license_type": "permissive", "max_line_length": 33, "num_lines": 15, "path": "/NXSpider/common/__init__.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\nimport sys\n\nPYTHON3 = False\nPYTHON2 = False\nif sys.version > \"3\":\n PYTHON3 = True\n\nif sys.version[0] == '2':\n PYTHON2 = True\n" }, { "alpha_fraction": 0.5006303191184998, "alphanum_fraction": 0.5146378874778748, "avg_line_length": 39.556819915771484, "blob_id": "273e62ee2ef3e909cc0f9e2f8797ad711a6f02f4", "content_id": "c4628d2e84e3850e6cb1689090f3fc6e80199fb6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7139, "license_type": "permissive", "max_line_length": 116, "num_lines": 176, "path": "/NXSpider/bin/config_ctrl.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/5.\n# email to [email protected]\n#\nimport json\nimport os\n\nfrom cement.core.controller import CementBaseController, expose\n\nfrom NXSpider.common import log\nfrom NXSpider.common.config import Config, default_path_key, mv_resolutions\nfrom NXSpider.common.constant import default_download_dir\n\n\nclass ConfigController(CementBaseController):\n class Meta:\n label = \"config\"\n stacked_on = 'base'\n description = \"NXSpider config\"\n arguments = [\n (['-mh', '--mhost'],\n dict(help=\"mongo host\")),\n (['-mp', '--mport'],\n dict(help=\"mongo port\")),\n (['-mu', '--muser'],\n dict(help=\"mongo user\")),\n (['-mpw', '--mpassword'],\n dict(help=\"mongo password\")),\n (['-mn', '--mdbname'],\n dict(help=\"mongo db name, default nxspider\")),\n (['-nomongo', '--nomongo'],\n dict(help=\"no mongo mode, true(1) or false(0), default True, while true, mongodb will be never used\")),\n\n (['-path', '--path_download'],\n dict(help=\"download path, default ~/.nxspider/download_files/, eg. -path defualt;path1;path2\")),\n (['-mvr', '--mv_resolution'],\n dict(help=\"mv default resolution, [240, 480, 720, 1080] default 720\")),\n (['-stt', '--shortcut'],\n dict(help=\"build shortcuts, true(1) or false(0), default False\")),\n (['-tag', '--media_tag'],\n dict(help=\"media tag, true(1) or false(0), default True\")),\n (['-tag163', '--media_tag_163'],\n dict(help=\"media tag of 163 comment, true(1) or false(0), default True\")),\n (['-dfc', '--download_file_check'],\n dict(help=\"check file exist in paths set, true(1) or false(0), default True\")),\n ]\n\n @expose(help=\"config mongo server with [-h --host] [-p --port] \"\n \"[-u --user] [-pw --password] [-c --collections]\")\n def config_mongo(self):\n config = Config()\n config_dict = config.config # type: dict\n mongo_key = 'mongo'\n is_config = False\n try:\n if self.app.pargs.mhost is not None:\n config_dict[mongo_key]['host'] = self.app.pargs.mhost\n config_dict['no_mongo'] = False\n is_config = True\n\n if self.app.pargs.mport is not None:\n config_dict[mongo_key]['port'] = int(self.app.pargs.mport)\n is_config = True\n\n if self.app.pargs.muser is not None:\n config_dict[mongo_key]['username'] = self.app.pargs.muser\n is_config = True\n\n if self.app.pargs.mpassword is not None:\n config_dict[mongo_key]['password'] = self.app.pargs.mpassword\n is_config = True\n\n if self.app.pargs.mdbname is not None:\n config_dict[mongo_key]['name'] = self.app.pargs.mdbname\n is_config = True\n\n if self.app.pargs.nomongo is not None:\n config_dict['no_mongo'] = True if self.app.pargs.nomongo.lower() == 'true'\\\n or self.app.pargs.nomongo == '1' else False\n is_config = True\n except:\n log.print_err(\"input error, pls check\")\n raise\n if is_config:\n config.save_config_file()\n log.print_info(\"config success\")\n self.config_show()\n\n @expose(help=\"config spider behavior with [-path <defualt,path1,path2>]\"\n \"[-mvr <240,480,720,1080>] [-tag <1 or 0>] [-tag163 <1 or 0>]\"\n \"[-dfc <1 or 0>] [-stt <1 or 0>]\")\n def config_spider(self):\n config = Config()\n config_dict = config.config # type: dict\n is_config = False\n try:\n if self.app.pargs.path_download is not None:\n paths = self.app.pargs.path_download.split(',') # type: list\n if default_path_key in paths:\n index = paths.index(default_path_key)\n paths.remove(default_path_key)\n paths.insert(index, default_download_dir)\n\n final_paths = []\n for p in paths:\n try:\n # some error need pass\n if os.path.isdir(p) is False:\n os.mkdir(p)\n final_paths.append(p)\n except:\n log.print_warn(\"path may be wrong and be deleted: {}\".format(p))\n pass\n\n if not final_paths:\n final_paths.append(default_download_dir)\n\n log.print_info('path will be set as: ' + ','.join(final_paths))\n\n config_dict['download_path'] = final_paths\n is_config = True\n\n if self.app.pargs.mv_resolution is not None:\n r = int(self.app.pargs.mv_resolution)\n if r not in mv_resolutions:\n log.print_warn(\"-mvr resolution config skip, value must be 240,480,720,1080\")\n config_dict['mv_def_resolution'] = r\n is_config = True\n\n if self.app.pargs.media_tag is not None:\n config_dict['media_tag'] = True if self.app.pargs.media_tag.lower() == 'true'\\\n or self.app.pargs.media_tag == '1' else False\n is_config = True\n\n if self.app.pargs.media_tag_163 is not None:\n config_dict['media_tag_163'] = True if self.app.pargs.media_tag_163.lower() == 'true' \\\n or self.app.pargs.media_tag_163 == '1' else False\n is_config = True\n\n if self.app.pargs.shortcut is not None:\n config_dict['shortcut'] = True if self.app.pargs.shortcut.lower() == 'true' \\\n or self.app.pargs.shortcut == '1' else False\n is_config = True\n\n\n except:\n log.print_err(\"input error, pls check\")\n raise\n if is_config:\n config.save_config_file()\n log.print_info(\"config success\")\n self.config_show()\n\n @expose(help=\"check config is valid or not\")\n def config_check(self):\n self.config_show()\n try:\n config = Config()\n if config.config_test():\n log.print_info('config check complete, all is well done!')\n except:\n log.print_err('config check failed, pls re config')\n\n @expose(help=\"clear all config, u need re-config from beginning\")\n def config_clear(self):\n Config().config_reset()\n log.print_info(\"config has been reset, u need re-config from beginning pls\")\n self.config_show()\n\n @expose(help=\"list all config opt\")\n def config_show(self):\n config_dict = Config().config\n log.print_info(\"config will be show fellow:\")\n print(json.dumps(config_dict, ensure_ascii=False, indent=1))\n\n" }, { "alpha_fraction": 0.7139001488685608, "alphanum_fraction": 0.7363922595977783, "avg_line_length": 27.87013053894043, "blob_id": "d430fcf244be5988a571684c01a0c2ac3a34bda3", "content_id": "71157e5eb0ea60b8df291ecb424356375324a565", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3963, "license_type": "permissive", "max_line_length": 109, "num_lines": 77, "path": "/DEV.md", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "NXSpider\n=================\n\nNXSpider, NetEase X Spider,一个强大的(网易云音乐)mp3,mv爬虫,可以下载和收集mp3,mv信息,同时附带多媒体标签信息。采用python编写,mongo数据库(非必须),递归算法核心实现\n\n[![Software License](https://img.shields.io/pypi/l/Django.svg)](LICENSE.md)\n![platform](https://img.shields.io/badge/python-2.7-green.svg)\n![platform](https://img.shields.io/badge/python-3.5-green.svg)\n\n[新手先看这里](SIMPLE_USE.md) | [历史版本在这里](VERSION.md) | [开发详情在这里](DEV.md)\n\n## 开发及问题(不关心的可以不看了)\n\n### 开发调试\n1. 已安装则采用(下文采用该方式进行说明):\n```\n$nxspider ...\n```\n2. 未安装+win采用(未安装指没执行setup.py,只用代码运行):\n```\nbash_python(3) NXSipder/bin/cli.py ...\n```\n3. 未安装+linux采用:\n```\nPYTHONPATH=. python(3) NXSipder/bin/cli.py ...\n```\n4. 注意,win下采用bash_python时,分割符 ```,``` 必须改为 ```:``` eg.\n```\n$bash_python.bat NXSpider/bin/cli.py config-spider -path d:\\netease_dw:default\n```\n\n### 配置说明\n1. ~~config-mongo必须运行爬取前配置!!!,dbname可不指定,默认为nxspider~~\n2. 默认无mongodb模式,需要采集数据则通过配置第一条即可 `nxspider config-mongo ..`\n3. 一旦配置了 `mongodb -mh` 则 `-nomogo` 会自动设置为0,即配置了host就会使用的意思\n2. path **强烈建议**爬取前配置,```,``` 为多个下载路径分隔符。default指`~/.nxspider/download_files/`\n3. 其他配置可选,请查看 `nxspider -h`\n4. ```-tag 1``` 建议保留 `-tag163 1` 根据需求保留\n6. 配置完建议执行 ```nxspider config-check``` 检查配置正确性\n\n### 注意\n- 本项目纯粹是学习开发使用,欢迎大家互相讨论,下载的资料请24小时内删除\n- 涉及侵权以及版权问题欢迎讨论和提出\n\n### 协助开发或2次开发建议\n1. 希望尽可能(yahoo邮件)跟作者(LipsonChan)联系,以及对项目进行加★\n2. 核心代码为NXSpider/bin以及NXSpider/spider/base_driver.py\n3. 主要逻辑为通过api获得对于json数据,采用递归+配置方式,自动下载可下载对象\n4. 如果有任何反馈,希望回复到项目的issue,注明版本号,运行环境及描述清楚问题\n\n### windows + python2问题\n1. 参数输入(目前只有查询)非中文和latin,可能会出现问题\n2. 不推荐配置路径中有非latin字符,及路径最好是英文,没测试过\n3. 查询结果输出无法显示非中文和latin数据,如韩文(日文可以)\n\n### 其他问题\n- 同歌手同名MP3不会被重复下载\n\n### 开发历程\n- 基于spider163项目(不满足且有小bug),开发mongodb以及可以更多爬取项目\n- 增加更多url,MV下载,尝试使用eapi发现很难发现加密规则\n- 修改项目结构为下载驱动driver和model双层,配合metaclass进行递归式下载,减少后续增量开发工作\n- 为了配合Netease app和桌面软件,修改mp3 mp4 tag信息\n- 艰难的使用了eyed3,windows超级麻烦(现成的dll还要配置,还有字符集问题),发现不支持mp4,非常难过\n- 发现mutagen,果断弃坑eyed3\n- 发布0.0.1版本,可以通过playlist,ablum,aritst_top_mp3爬取和下载mp3,mv\n- 发现mongodb可能有人不喜欢,新增无数据库版本(默认无需使用),通过配置可切换\n- 新增通过获取最火playlist进行爬取\n- 新增了好多功能,读 [VERSION.md](VERSION.md)\n- api全靠`fiddler`和猜,**一把辛酸一把泪**\n\n### 下阶段开发\n- 通过用户id,爬取该用户所有的歌单 √\n- 通过歌手id,爬取该歌手所有专辑 √\n- 通过排行版,爬取最新n个歌单 √\n- mongodb 可选?不强制(但对于离线则无法加tag,虽然只是个人用) √\n- 通过登录(或非登录),爬取用户收藏的歌单,mv **不安全(明文账号密码),而且经常登录api会被限,不开放说明**\n" }, { "alpha_fraction": 0.4928571283817291, "alphanum_fraction": 0.6714285612106323, "avg_line_length": 16.625, "blob_id": "997278c5c556dd28c599e926e1520b11ce90cd0a", "content_id": "3bc1f70e3849ded624faa36b73cc7843fa56d66c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 140, "license_type": "permissive", "max_line_length": 20, "num_lines": 8, "path": "/requirements.txt", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "six>=1,<=2\ncement>=2,<3\nmutagen>=1.40,<2\nterminaltables>=3,<4\ncryptography>=2.2,<3\nmongoengine>=0.15,<1\nrequests>=2.19,<3\ncolorama>=0.3,<0.4" }, { "alpha_fraction": 0.5651697516441345, "alphanum_fraction": 0.582694411277771, "avg_line_length": 23.026315689086914, "blob_id": "bc07e9820d1fa550e673fd1209b4b8a50a92bd9b", "content_id": "c488966393e41549013cefe0feb20cfd4156a44c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 913, "license_type": "permissive", "max_line_length": 76, "num_lines": 38, "path": "/NXSpider/utility/shortcut.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/12/15.\n# email to [email protected]\n#\n\nimport os\n\nif os.name == \"nt\":\n def symlink_ms(source, link_name):\n import ctypes\n csl = ctypes.windll.kernel32.CreateSymbolicLinkW\n csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)\n csl.restype = ctypes.c_ubyte\n flags = 1 if os.path.isdir(source) else 0\n try:\n if csl(link_name, source.replace('/', '\\\\'), flags) == 0:\n raise ctypes.WinError()\n except Exception as e:\n pass\n\n\n os.symlink = symlink_ms\n\n\ndef symlink(source, link_name):\n try:\n os.symlink(source, link_name)\n except Exception as e:\n pass\n\n\n# a = os.getcwd()\n# src = os.path.join(a, \"install.bat\")\n# dst = os.path.join(a, \"test.link\")\n#\n# os.symlink(os.path.join(a, \"install.bat\"), os.path.join(a, \"test.link\"))\n" }, { "alpha_fraction": 0.5379061102867126, "alphanum_fraction": 0.5848375558853149, "avg_line_length": 16.3125, "blob_id": "7a82dab413fa9f73c3b00dd08bb34ca098db3103", "content_id": "7bf4bb6ce6ae6676b958ec059cce89eaffc3c901", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "permissive", "max_line_length": 41, "num_lines": 16, "path": "/NXSpider/spider/artist.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/26.\n# email to [email protected]\n#\n\nfrom NXSpider.spider.base_driver import *\n\n\nclass Artist(Music163Obj):\n __model_name__ = ArtistModel\n __model_rfilter__ = {\n 'img1v1Id',\n 'picId',\n }\n" }, { "alpha_fraction": 0.5031654238700867, "alphanum_fraction": 0.5240848064422607, "avg_line_length": 26.3157901763916, "blob_id": "43765f2a01ae42e3f33332441c5a6e27d61b515c", "content_id": "aba075c3a131165a30a7e42fc5f0a9391c4adf88", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3633, "license_type": "permissive", "max_line_length": 90, "num_lines": 133, "path": "/NXSpider/utility/media_tag.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/17.\n# email to [email protected]\n#\nimport os\n\nfrom mutagen import id3\nfrom mutagen.id3 import ID3\nfrom mutagen.mp4 import MP4\n\nfrom NXSpider.common.config import Config\nfrom NXSpider.common.encrypt import aes_ecb\nfrom NXSpider.model.export import Mp3Model, Mp4Model\n\nkey_names = ['title', 'artist', 'album', 'album_artist', 'track_num', 'comment']\nencoding_keys = ['title', 'artist', 'album', 'album_artist', 'comment']\nmutagen_idv3_key_map = {\n 'title': 'TIT2',\n 'artist': 'TPE1',\n 'album': 'TALB',\n 'album_artist': 'TPE2',\n 'track_num': 'TRCK',\n 'comment': 'COMM'\n}\n\nmugagen_mp4_key_map = {\n 'title': '\\xa9nam',\n 'artist': '\\xa9ART',\n 'comment': '\\xa9cmt',\n}\naes_code = \"#14ljk_!\\]&0U<'(\"\n\n\n# @tools.ignored(Exception)\ndef attach_mp3_idv3(doc, file):\n \"\"\"\n :type doc: Mp3Model\n :param doc:\n :param file:\n :return:\n \"\"\"\n artists = [('[\"%s\",%d]' % (x['name'], x.id)) for x in doc.artists]\n artists_str = \",\".join(artists)\n\n authors = \",\".join([x['name'] for x in doc.artists])\n data = {\n 'title': doc['name'],\n 'artist': authors,\n 'album': doc['album']['name'],\n 'album_artist': authors,\n 'track_num': str(doc['no']),\n }\n\n if Config().get_media_tag_163():\n comment_plaintext = u'music:{\"musicId\":%d,\"musicName\":\"%s\",\"bitrate\":320000,' \\\n u'\"albumId\":%d,\"album\":\"%s\", \"artist\":[%s]}' \\\n % (doc.id, doc['name'], doc.album.id, doc.album['name'],\n artists_str)\n comment = \"163 key(Don't modify):\" + aes_ecb(comment_plaintext, aes_code).decode()\n data['comment'] = comment\n\n try:\n mp3 = ID3(file, v2_version=3)\n for k, v in data.items():\n if k not in mutagen_idv3_key_map:\n continue\n\n if k == 'comment':\n mp3.add(id3.COMM(lang='XXX', text=v))\n continue\n\n attr_type = getattr(id3, mutagen_idv3_key_map[k], None)\n if attr_type:\n mp3.add(attr_type(text=v))\n\n mp3.save(v2_version=3)\n except Exception as e:\n return False\n return True\n\n\n# @tools.ignored(Exception)\ndef attach_mp4_tag(doc, file):\n \"\"\"\n :type doc: Mp4Model\n :param doc:\n :param file:\n :return:\n \"\"\"\n authors = u\",\".join([x['name'] for x in doc.artists])\n data = {\n 'title': doc['name'],\n 'artist': authors,\n }\n\n if Config().get_media_tag_163():\n comment_plaintext = u'mv:{\"title\":\"%s\",\"mvId\":%d,\"artistId\":%d,' \\\n u'\"artistName\":\"%s\",\"pubTime\":\"%s\",\"bitrate\":%d}' \\\n % (doc['name'], doc.id, doc.artists[0].id,\n doc.artists[0]['name'], doc['publishTime'],\n doc['download_video_r'])\n comment = \"163 key(Don't modify):\" + aes_ecb(comment_plaintext, aes_code).decode()\n data['comment'] = comment\n\n try:\n mp4 = MP4(file)\n for k, v in data.items():\n if k not in mugagen_mp4_key_map:\n continue\n mp4[mugagen_mp4_key_map[k]] = v\n\n mp4.save()\n except Exception as e:\n return False\n return True\n\n\ndef attach_media_tag(doc, file):\n \"\"\"\n attach a media idv3\n :param doc:\n :param file:\n :return:\n \"\"\"\n if not os.path.exists(file):\n return False\n\n if isinstance(doc, Mp3Model):\n return attach_mp3_idv3(doc, file)\n elif isinstance(doc, Mp4Model):\n return attach_mp4_tag(doc, file)\n" }, { "alpha_fraction": 0.5641250610351562, "alphanum_fraction": 0.6272276043891907, "avg_line_length": 30.69444465637207, "blob_id": "e804681beff3131412ea21c32748de816d189470", "content_id": "31c87e97a223c09fe5a8e2246260cb3c186e3234", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3445, "license_type": "permissive", "max_line_length": 115, "num_lines": 108, "path": "/NXSpider/debug_test/crawl.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/11.\n# email to [email protected]\n#\nimport hashlib\n\nimport requests\n\nfrom NXSpider.common import tools, log\nfrom NXSpider.common.constant import all_download_type\nfrom NXSpider.spider.api import get_playlist_detail, get_mp3_links, get_album_detail, get_top_playlists, \\\n get_playlist_classes, \\\n top_artists, get_artists_songs, get_artist_album, get_mp3_details, search, phone_login, login, user_playlist, \\\n get_playlist_detail_v3, get_playlist_catelogs, hot_mvs, all_mvs, top_mvs\nfrom NXSpider.spider.playlist import Playlist\n\n# def playlist_by_id(link, download_type=all_download_type,\n# save=True, file_check=True):\n# playlist_detail = get_playlist_detail(link)\n# with tools.ignored(Exception):\n# log.print_info(\"%s author:%s\" % (\n# \"<\" + playlist_detail['name'] + \">\",\n# playlist_detail['creator']['nickname'],\n# ))\n#\n# playlist_mo = Playlist()\n# playlist_mo.parse_model(playlist_detail,\n# save=save, download_type=download_type,\n# file_check=file_check)\n# pass\n\n\n# def playlist_by_page(page, type=u\"全部\"):\n# play_url = constant.play_url.format(type, page * 35)\n# titles = []\n# try:\n# log.print_info(\"begin crawl playlist page: {}, type: {}\".format(page, type))\n# acmsk = {'class': 'msk'}\n# scnb = {'class': 'nb'}\n# dcu = {'class': 'u-cover u-cover-1'}\n# ucm = {'class': 'm-cvrlst f-cb'}\n# data = tools.curl(play_url, constant.header, type=constant.RETURE_HTML)\n# lst = data.find('ul', ucm)\n# for play in lst.find_all('div', dcu):\n# title = tools.encode(play.find('a', acmsk)['title'])\n# link = tools.encode(play.find('a', acmsk)['href']).replace(\"/playlist?id=\", \"\")\n# # playlist_by_id(link)\n# print(\"link: {}, title: {}\".format(link, title))\n# except Exception as e:\n# log.print_err(\"playlist page error:{} type:{} page:{}\".format(e, type, page))\n# log.print_err(e)\n\n\n# def playlist_all_page():\n# cf = \"全部\"\n# for i in range(36):\n# playlist_by_page(i + 1, cf)\n\n# test = get_mv_details([5322493,239037])\ntest = top_mvs()\ntest = all_mvs()\ntest = hot_mvs()\ntest = get_playlist_catelogs()\ntest = get_playlist_classes()\ntest = get_top_playlists()\ntest = user_playlist(48872048)\ntest = get_playlist_detail(92024088)\ntest = get_playlist_detail(2246057871)\ntest = get_playlist_detail(107020750)\ntest = get_playlist_detail_v3(107020750)\n\np = hashlib.md5('000'.encode('utf-8')).hexdigest()\ns = requests.Session()\n\ntest = user_playlist(92024088, session=s)\ntest = get_playlist_detail(107020750)\n\n\ntest = search('周杰伦', stype=100)\ntest = top_artists()\ntest = get_playlist_classes()\ntest = get_mp3_details([412902496,412902496,412902496])\n\ntest = get_artists_songs(9621)\ntest = get_artist_album(9621)\n\n\ntest = top_artists()\n# test = playlist_classes()\ntest = get_top_playlists()\n\ntest = get_album_detail(32324)\n\n# test = get_mp3_link(412902496)\n# playlist_by_id(466225104)\ntests = get_mp3_links([412902496, 326904])\n# http://music.163.com/api/song/detail?ids=[412902496,326904] work\n# http://music.163.com/api/album/32324 don't work\n# http://music.163.com/#/artist?id=9621\n# http://music.163.com/api/artist/9621 don't work\n# playlist_all_page()\n\n\n\n\nprint('a')\n" }, { "alpha_fraction": 0.5634615421295166, "alphanum_fraction": 0.5788461565971375, "avg_line_length": 23.186046600341797, "blob_id": "e418f50b90ec3ae27156087529e48aa0ed53a751", "content_id": "4cdbcafe5489408d8cfb6b9d805c0596cfa7b2ba", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "permissive", "max_line_length": 60, "num_lines": 43, "path": "/NXSpider/spider/playlist.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/20.\n# email to [email protected]\n#\nimport os\n\nfrom NXSpider.spider.mp3 import Mp3\nfrom NXSpider.spider.base_driver import *\nfrom NXSpider.spider.user import User\n\n\nclass Playlist(Music163Obj):\n __model_name__ = PlaylistModel\n __model_rfilter__ = {\n # garbage properties\n 'coverImgId_str',\n 'coverImgId_str',\n }\n __parse_recursion__ = {\n 'mp3': Mp3(),\n 'creator': User(),\n }\n\n @attr_replace(attr_name='tracks', new_name='mp3')\n def replace_tracks(self, obj):\n return obj\n\n @attr_replace(attr_name='creator', new_name='creator')\n def replace_creator(self, obj):\n if 'userId' in obj and 'id' not in obj:\n obj['id'] = obj['userId']\n return obj\n\n def shortcut_relative_name(self, doc):\n \"\"\"\n implement pls, not force\n return short cut relative path\n :param doc:\n :return:\n \"\"\"\n return os.path.join(self.__file_type__, doc['name'])\n" }, { "alpha_fraction": 0.5814639925956726, "alphanum_fraction": 0.5897284746170044, "avg_line_length": 27.711864471435547, "blob_id": "35db5a3db506d34cba4fb78ae0fd15ce29c7f519", "content_id": "f07c1f1fd00e7607fa1c5609301c8cccff46621f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1694, "license_type": "permissive", "max_line_length": 89, "num_lines": 59, "path": "/NXSpider/bin/base_ctrl.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/20.\n# email to [email protected]\n#\nimport sys\n\nfrom cement.core.controller import CementBaseController\n\nfrom NXSpider.common import log, PYTHON2\nfrom NXSpider.common.constant import all_download_type\n\n\ndef py2_decoding(string):\n if PYTHON2:\n return string.decode(sys.getfilesystemencoding())\n return string\n\n\ndef py2_encoding(string):\n if PYTHON2:\n return string.encode('utf8')\n\n\nclass NXSpiderBaseController(CementBaseController):\n def parse_download(self):\n \"\"\"\n lost of spider function will parse -dw param, this will do it\n :return:\n \"\"\"\n if self.app.pargs.download is None:\n download_type = []\n else:\n download_type = self.app.pargs.download.split(',') # type: list\n download_type = list(filter(lambda x: x in all_download_type, download_type))\n return download_type\n\n def param_check(self, params, func_name):\n \"\"\"\n this will check param inputted and require is complete or not, and print help\n help will be in expose(help='...'), and got by function name\n :param params:\n :param func_name:\n :return:\n \"\"\"\n help = None\n fun = getattr(self, func_name, None)\n if fun and getattr(fun, '__cement_meta__', None):\n help = fun.__cement_meta__['help']\n\n for p in params:\n param = getattr(self.app.pargs, p, None)\n if param is None:\n log.print_err(\"param {} miss, see help:\".format(p))\n if help:\n print(help)\n return False\n return True\n" }, { "alpha_fraction": 0.5121457576751709, "alphanum_fraction": 0.5206477642059326, "avg_line_length": 31.5, "blob_id": "fb53946dc8c617a632c9e4d281f7d5375a9ddd72", "content_id": "67e06b7e8ccd3518a1b90b290589bf4b3114fcc6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2470, "license_type": "permissive", "max_line_length": 87, "num_lines": 76, "path": "/NXSpider/bin/print_as_table.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/23.\n# email to [email protected]\n#\nfrom terminaltables import AsciiTable\n\n\ndef print_playlist(pls):\n table = AsciiTable([[\"ID\", \"Name\", \"User\", \"PlayCount\"]])\n table_data = [[str(item['id']), item['name'],\n item['creator']['nickname'],\n str(item['playCount']),\n ] for item in pls]\n table.table_data.extend(table_data)\n print(table.table)\n pass\n\n\ndef print_albums(abs):\n from terminaltables import AsciiTable\n table = AsciiTable([[\"ID\", \"Album\", \"Artist\", \"ArtistID\"]])\n table_data = [[str(item['id']), item['name'],\n ','.join([ar['name'] for ar in item['artists']]),\n ','.join([str(ar['id']) for ar in item['artists']]),\n ] for item in abs]\n table.table_data.extend(table_data)\n print(table.table)\n pass\n\n\ndef print_mp3s(mp3s):\n table = AsciiTable([[\"ID\", \"Name\", \"Album\", \"AlbumID\", \"Artist\", \"ArtistID\"]])\n table_data = [[str(item['id']), item['name'],\n item['album']['name'], item['album']['id'],\n ','.join([ar['name'] for ar in item['artists']]),\n ','.join([str(ar['id']) for ar in item['artists']]),\n ] for item in mp3s]\n table.table_data.extend(table_data)\n print(table.table)\n pass\n\n\ndef print_users(users):\n table = AsciiTable([[\"ID\", \"Name\", \"Signature\"]])\n table_data = [[str(item['userId']), item['nickname'],\n item['signature'],\n ] for item in users]\n table.table_data.extend(table_data)\n print(table.table)\n pass\n\n\ndef print_artists(artists):\n table = AsciiTable([[\"ID\", \"Name\", \"AlbumNum\", \"MVNum\"]])\n table_data = [[str(item['id']), item['name'],\n str(item['albumSize']),\n str(item['mvSize'])\n ] for item in artists]\n table.table_data.extend(table_data)\n print(table.table)\n pass\n\n\ndef print_mvs(mvs):\n table = AsciiTable([[\"ID\", \"Name\", \"Artist\", \"ArtistID\", \"Duration\", \"PlayCount\"]])\n table_data = [[str(item['id']), item['name'],\n item['artistName'],\n item['artistId'],\n '%02d:%02d' % divmod(int(item['duration'] / 1000), 60),\n item['playCount'],\n ] for item in mvs]\n table.table_data.extend(table_data)\n print(table.table)\n pass\n" }, { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8409090638160706, "avg_line_length": 44, "blob_id": "f18016d1d2f1a3befd4f983276bfe2c29e51ccdf", "content_id": "e33b80f3decd4f6314ca782a57810ba4599a45f1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "permissive", "max_line_length": 44, "num_lines": 1, "path": "/TODO.md", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "readme modify(add shortcut and add password)" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6558333039283752, "avg_line_length": 21.641510009765625, "blob_id": "c28537a0d84c9a8c488b4bd9ae7b490d5b88fd61", "content_id": "2e89f6728ad634a408cb5dd6f10a37e24d1533bf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1200, "license_type": "permissive", "max_line_length": 74, "num_lines": 53, "path": "/NXSpider/bin/cli.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/6/5.\n# email to [email protected]\n#\nimport datetime\n\nfrom cement.core.controller import CementBaseController, expose\nfrom cement.core.foundation import CementApp\n\nfrom NXSpider.bin.config_ctrl import ConfigController\nfrom NXSpider import version\nfrom colorama import init\n\nfrom NXSpider.bin.show_ctrl import ShowController\nfrom NXSpider.bin.spider_ctrl import SpiderController\nfrom NXSpider.common.config import Config\n\n\nBANNER = \"\"\"\n{} Application v{}\nCopyright (c) {} {}\n\"\"\".format(version.__title__, version.__version__ ,\n version.__author__, datetime.datetime.now().year)\n\ninit(autoreset=True)\n\n\nclass VersionController(CementBaseController):\n class Meta:\n label = 'base'\n description = ''\n arguments = [\n (['-v', '--version'], dict(action='version', version=BANNER)),\n ]\n\n\nclass App(CementApp):\n class Meta:\n label = \"NXSpider\"\n base_controller = \"base\"\n handlers = [VersionController, ConfigController,\n SpiderController, ShowController]\n\n\ndef main():\n with App() as app:\n app.run()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6171428561210632, "alphanum_fraction": 0.6628571152687073, "avg_line_length": 18.44444465637207, "blob_id": "f46c816fb50f98028b417b1ab5d853cf55857d7b", "content_id": "fd4f494187a4ef59c4370e5e325ca2af10a99728", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "permissive", "max_line_length": 41, "num_lines": 9, "path": "/NXSpider/debug_test/config.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/20.\n# email to [email protected]\n#\nfrom NXSpider.common.config import Config\n\npaths = Config().get_paths()\n" }, { "alpha_fraction": 0.6215596199035645, "alphanum_fraction": 0.6567278504371643, "avg_line_length": 26.82978630065918, "blob_id": "35e02726e2ce107392c9a82d8b6571f88488883d", "content_id": "37b99b1acbb50bb205229dbc611993566182e44a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "permissive", "max_line_length": 99, "num_lines": 47, "path": "/NXSpider/debug_test/idv3.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/16.\n# email to [email protected]\n#\nimport locale\n\n# import eyed3\nfrom mutagen.mp4 import MP4\n\nfrom NXSpider.utility.idv3_base import delete_163comment\nfrom NXSpider.utility.modify_idv3 import attach_media_tag_by_path, attach_media_tag_by_conf_path\n\nfrom mutagen.id3 import ID3, TALB, COMM\nfrom mutagen.easyid3 import EasyID3\nTALB\nCOMM\nMP4\n# ['TIT2', 'TPE1', 'TRCK', 'TALB', 'TPOS', 'TSSE', 'APIC:', 'COMM::XXX']\n# 名称 歌手 trackNum 专辑 pos 编码 comments\n# 'TPE2' 'aART' album_artist\n\nattach_media_tag_by_path(u'E:\\\\test')\nattach_media_tag_by_conf_path()\n\n# a = ID3(u'E:\\\\薛之谦 - 丑八怪.mp3', v2_version=3)\n#\n# a = MP4(u'E:\\\\(G)I-DLE - LATATA.mp4')\n#\n# a = eyed3.load(u'E:\\\\薛之谦 - 丑八怪.mp3')\n# a.tag.comments.remove('')\n# a = eyed3.load(u'E:\\\\(G)I-DLE - LATATA.mp3')\n# a = eyed3.load(u'E:\\\\(G)I-DLE - LATATA.mp4')\n# a.tag.title(\"test\")\n# a.tag.comments.remove('')\n\nlocale.getlocale(locale.LC_ALL)\nfile = u\"E:/新建文件夹/Billion - Dancing Alone_wapi - 副本.mp3\"\nfile = u\"D:\\\\Project\\\\python\\\\music.crawl\\\\download_files\\\\mp3\\\\1호선 뮤직\\\\1호선 뮤직 - 열대야.mp3\"\nfile = u\"D:\\\\Project\\\\python\\\\music.crawl\\\\download_files\\\\mp3\\\\Dal★shabet\\\\Dal★shabet - JOKER.mp3\"\ndelete_163comment(file)\nprint('a')\n\nattach_media_tag_by_conf_path()\n\nprint('a')\n" }, { "alpha_fraction": 0.5739436745643616, "alphanum_fraction": 0.5850487351417542, "avg_line_length": 27.604650497436523, "blob_id": "b6815bacf89ab5be051803d8579230d0e2a05f31", "content_id": "3394e3a705dc9c092f4303b337cab3e3e6f6d43e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3692, "license_type": "permissive", "max_line_length": 91, "num_lines": 129, "path": "/NXSpider/utility/modify_idv3.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/17.\n# email to [email protected]\n#\nimport os\nimport shutil\n\nfrom NXSpider.common.config import Config\nfrom NXSpider.model.mongo_model import Mp3Model, Mp4Model\nfrom NXSpider.spider.mp3 import Mp3\nfrom NXSpider.spider.mv import MV\nfrom NXSpider.common import tools, log, constant\nfrom NXSpider.spider.base_driver import Music163Obj\nfrom NXSpider.utility.media_tag import attach_media_tag\n\nmedia_types = {\n 'mp3': [Mp3Model, Mp3],\n 'mp4': [Mp4Model, MV],\n}\npaths = Config().get_paths()\n\n\ndef attach_media_idv3_by_db():\n \"\"\"\n attach media idv3 by paths in config\n :return:\n \"\"\"\n for suffix, type_setting in media_types:\n model, driver = type_setting # type: Mp3Model or Mp4Model, Music163Obj\n objs = model.objects(downloaded=True)\n for obj in objs:\n file_path = driver.download_check(obj, check_file=True)\n if not file_path:\n continue\n\n if is_latin1(file_path):\n res = attach_media_tag(obj, file_path)\n else:\n res = attach_shadow(file_path, suffix, obj)\n\n log.print_info('idv3 attach %s, file: %s'\n % ('success' if res else 'failed', file_path))\n\n\[email protected](Exception)\ndef load_media_obj(media_type, download_name):\n return media_types[media_type][0].objects(download_file_name=download_name).first()\n\n\ndef create_tmp_dir():\n path = os.path.join(constant.main_dir, 'tmp')\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n\ndef delete_tmp_dir():\n path = os.path.join(constant.main_dir, 'tmp')\n if os.path.exists(path):\n os.removedirs(path)\n\n\ndef is_latin1(s):\n try:\n s.encode(\"iso-8859-1\")\n return True\n except UnicodeEncodeError:\n return False\n\n\ndef attach_media_tag_by_path(path):\n \"\"\"\n please run in python3 if your os is windows, cause os.walk has a encoding bug\n :type path: str\n :param path:\n :return:\n \"\"\"\n\n for root, dirs, files in os.walk(path):\n for file in files: # type: str\n suffix_i = file.rfind('.')\n suffix = file[suffix_i + 1:]\n if suffix not in media_types:\n continue\n\n split_txt = ' - '\n split_i = file.find(split_txt)\n if split_i == -1:\n continue\n\n artist = file[:split_i]\n download_file_name = os.path.join(artist, file)\n obj = load_media_obj(suffix, download_file_name)\n file_path = os.path.join(root, file)\n\n # fuck!!!! although i fix python-magic encoding bug of Chinese str in windows,\n # the fuck libmagic.dll doesn't recognize Korean. so, i am really pissed off to\n # change file name and change back\n '''\n if is_latin1(file_path):\n res = attach_media_tag(obj, file_path)\n else:\n res = attach_shadow(file_path, suffix, obj)\n '''\n\n # Dobby is free, thanks to mutagen\n res = attach_media_tag(obj, file_path)\n\n log.print_info('idv3 attach %s, file: %s'\n % ('success' if res else 'failed', file_path))\n\n\ndef attach_shadow(file, suffix, obj):\n tmp_name = 'abc'\n tmp_file = os.path.join(create_tmp_dir(), tmp_name + '.' + suffix)\n shutil.copy(file, tmp_file)\n res = attach_media_tag(obj, tmp_file)\n if res:\n os.remove(file)\n shutil.copy(tmp_file, file)\n os.remove(tmp_file)\n return res\n\n\ndef attach_media_tag_by_conf_path():\n for path in paths:\n attach_media_tag_by_path(path)\n\n\n" }, { "alpha_fraction": 0.5217254161834717, "alphanum_fraction": 0.5456548929214478, "avg_line_length": 22.701492309570312, "blob_id": "7be0b87f30c2310ff4afd9df912a7f946d96a6f7", "content_id": "1844da35769a2acbc61697b96657038cfe5d963c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3192, "license_type": "permissive", "max_line_length": 80, "num_lines": 134, "path": "/NXSpider/utility/idv3_base.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/16.\n# email to [email protected]\n#\nimport os\n\nimport eyed3\nfrom eyed3.id3 import ID3_V2_3\nfrom mutagen.id3 import ID3, Encoding\nfrom mutagen.mp4 import MP4\nfrom mutagen import id3\n\nkey_names = ['title', 'artist', 'album', 'album_artist', 'track_num', 'comment']\nencoding_keys = ['title', 'artist', 'album', 'album_artist', 'comment']\n# ['TIT2', 'TPE1', 'TRCK', 'TALB', 'TPOS', 'TSSE', 'APIC:', 'COMM::XXX']\n# 名称 歌手 trackNum 专辑 pos 编码 comments\n# 'TPE2' 'aART' album_artist\nmutagen_idv3_key_map = {\n 'title': 'TIT2',\n 'artist': 'TPE1',\n 'album': 'TALB',\n 'album_artist': 'TPE2',\n 'track_num': 'TRCK',\n 'commemt': 'COMM'\n}\n\nmugagen_mp4_key_map = {\n 'title': '\\xa9nam',\n 'artist': '\\xa9ART',\n 'comment': '\\xa9cmt',\n}\n\n\ndef delete_163comment(file):\n \"\"\"\n :type file: str\n :param file:\n :return:\n \"\"\"\n if not os.path.exists(file):\n return\n suffix = file[file.rfind('.'):]\n if suffix not in ['.mp3', '.mp4']:\n return\n\n try:\n id3_file = eyed3.load(file)\n id3_file.initTag(version=ID3_V2_3)\n id3_file.tag.comments.remove('', lang=b'XXX')\n id3_file.tag.save()\n except Exception as e:\n raise e\n\n\ndef attach_mp3_idv3(filename, data):\n\n\n try:\n mp3 = ID3(filename, v2_version=3)\n for k, v in data.items():\n if k not in mutagen_idv3_key_map:\n continue\n\n if k == 'comment':\n mp3.add(id3.COMM(lang='XXX', text=v))\n continue\n\n attr_type = getattr(id3, mutagen_idv3_key_map[k], None)\n if attr_type:\n mp3.add(attr_type(text=v))\n\n mp3.save()\n except Exception as e:\n return False\n return True\n\n\ndef attach_mp4_tag(filename, data):\n if not os.path.exists(filename):\n return False\n\n try:\n mp4 = MP4(filename)\n for k, v in data.items:\n if k not in mugagen_mp4_key_map:\n continue\n mp4[mugagen_mp4_key_map[k]] = v\n\n mp4.save()\n except Exception as e:\n return False\n return True\n\n\ndef attach_media_idv3(filename, data, encoding_func=None):\n \"\"\"\n attach data to idv3 of a file\n :param encoding_func: change encoding of values\n :type encoding_func: function | None\n :type filename: str\n :param filename:\n :type data: dict\n :param data:\n :return:\n \"\"\"\n if not os.path.exists(filename):\n return\n suffix = filename[filename.rfind('.'):]\n if suffix not in ['.mp3', '.mp4']:\n return\n\n try:\n id3_file = eyed3.load(filename)\n id3_file.initTag(version=ID3_V2_3)\n\n for k, v in data.items():\n if k not in key_names:\n continue\n\n if encoding_func and k in encoding_keys:\n v = encoding_func(v)\n\n if k == 'comment':\n id3_file.tag.comments.set(v, description=u'', lang=b'XXX')\n continue\n\n setattr(id3_file.tag, k, v)\n id3_file.tag.save()\n except Exception as e:\n return False\n\n return True\n" }, { "alpha_fraction": 0.5893169641494751, "alphanum_fraction": 0.5980735421180725, "avg_line_length": 12.75903606414795, "blob_id": "b9312747257c76e58e1a96dd24c26b58a4bc54db", "content_id": "00024bb729b062fe31c7b5f136b605884c043357", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1142, "license_type": "permissive", "max_line_length": 42, "num_lines": 83, "path": "/NXSpider/model/dict_model.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\n\n\nclass BaseDoc(dict):\n def save(self):\n pass\n\n def __getattr__(self, item):\n if item not in self:\n return None\n\n value = self[item]\n if isinstance(value, dict):\n value = BaseDoc(value)\n return value\n\n\ndef get_one_model_by_key(model, model_id):\n \"\"\"\n load or create a model by id\n :param model:\n :param model_id:\n :return: doc, is_new\n :rtype: (DynamicDocument, boolean)\n \"\"\"\n\n return model(id=model_id), True\n\n\ndef update_dynamic_doc(doc, data):\n \"\"\"\n :type doc: dict\n :type data: dict\n :param doc:\n :param data:\n :return:\n \"\"\"\n doc.update(data)\n\n\nclass ConfigModel(BaseDoc):\n pass\n\n\nclass AuthorModel(BaseDoc):\n pass\n\n\nclass Mp3Model(BaseDoc):\n pass\n\n\nclass ArtistModel(BaseDoc):\n pass\n\n\nclass Mp4Model(BaseDoc):\n pass\n\n\nclass VideoModel(BaseDoc):\n pass\n\n\nclass PlaylistModel(BaseDoc):\n pass\n\n\nclass AlbumModel(BaseDoc):\n pass\n\n\nclass UserModel(BaseDoc):\n pass\n\n\nclass Downloaded(BaseDoc):\n pass\n" }, { "alpha_fraction": 0.6626602411270142, "alphanum_fraction": 0.71875, "avg_line_length": 25.02083396911621, "blob_id": "c9fc4189718da3f0c1194de25751a5e944384b8e", "content_id": "18c8965992575f8a0b1a8f57ffc742cfa213ea2f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2290, "license_type": "permissive", "max_line_length": 109, "num_lines": 48, "path": "/VERSION.md", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "NXSpider\n=================\n\nNXSpider, NetEase X Spider,一个强大的(网易云音乐)mp3,mv爬虫,可以下载和收集mp3,mv信息,同时附带多媒体标签信息。采用python编写,mongo数据库(非必须),递归算法核心实现\n\n[![Software License](https://img.shields.io/pypi/l/Django.svg)](LICENSE.md)\n![platform](https://img.shields.io/badge/python-2.7-green.svg)\n![platform](https://img.shields.io/badge/python-3.5-green.svg)\n\n[新手先看这里](SIMPLE_USE.md) | [历史版本在这里](VERSION.md) | [开发详情在这里](DEV.md)\n\n## Update History\n\n#### 2018-12-17 v0.0.5\n- 增加功能,通过配置,创建歌单,歌手,唱片目录系统(快捷方式)\n- 增加功能,通过歌曲id,爬取歌曲\n- 增加功能(失败),通过登录爬取收藏视频(非MV),但由于接口问题,未能使用\n- 优化说明文档和安装脚本,使安装和更新更容易\n\n#### 2018-06-23 v0.0.4\n- 增加功能,通过登录爬取用户收藏mv\n- 增加功能,通过登录爬取用户所有歌单(包括收藏)\n- 增加功能,通过歌单分类,爬取最火歌单\n- 增加功能,打印歌单分类\n- 增加功能,爬取最火mv\n- 优化代码,优化所有文档,优化输出\n- 解决小bug:mv id存在但获取不到详情时的代码异常\n- 默认mongo非使用,且不安装python依赖包,通过 `pip+req.txt` 进行安装\n- 方便windows用户增加小部分bat\n- 项目功能已满足作者使用,暂时没什么开发欲望了\n\n#### 2018-06-21 v0.0.3\n- 增加功能,通过用户id获得歌单,并爬取歌单信息,mp3,mv\n- 增加功能,通过歌手id爬取所有专辑信息,下载所有专辑mp3,mv\n- 增加 `VERSION.md`\n\n#### 2018-06-20 v0.0.2\n- mongodb改为非必须,默认不需要,并提供相应配置\n- 增加功能,爬取特定分类最火歌单信息并下载mp3,mv\n- 增加 `SIMPLE_USE.md` 提供最简单的入门说明\n- 优化部分代码\n\n#### 2018-06-19 v0.0.1\n- 首次发布, mongodb + 递归 + python(3) + tag\n- 搜索歌单,歌手,专辑,用户等信息\n- 通过歌单,歌手,专辑id,下载mp3, mv\n- 所有信息会保存早mongodb中\n- 高可配置,所有mp3, mv可以通过配置,增加多媒体标签信息(歌手,专辑,唱片,163comment!!!)" }, { "alpha_fraction": 0.5491606593132019, "alphanum_fraction": 0.5755395889282227, "avg_line_length": 18.85714340209961, "blob_id": "530e7a9bfccc5dadbabf8c69e909dd2b5dfd553c", "content_id": "4b47fcab01e8332da4b185ae8a9d4749d9b4ca5f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 417, "license_type": "permissive", "max_line_length": 41, "num_lines": 21, "path": "/NXSpider/spider/user.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/25.\n# email to [email protected]\n#\nfrom NXSpider.spider.base_driver import *\n\n\nclass User(Music163Obj):\n __model_name__ = UserModel\n __model__rfilter__ = {\n 'userId',\n\n # garbage properties\n 'avatarImgId',\n 'backgroundImgId',\n 'avatarImgIdStr',\n 'backgroundImgIdStr',\n 'avatarImgId_str',\n }\n" }, { "alpha_fraction": 0.5626780390739441, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 24.962963104248047, "blob_id": "04697374363e500c53ab8b0b11ee5d1d0825f07c", "content_id": "710eb2425b4a0a8a046e80e1606da6b350d00bbe", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "permissive", "max_line_length": 78, "num_lines": 27, "path": "/NXSpider/debug_test/change_mp3_to_mp4.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/15.\n# email to [email protected]\n#\nimport os\nimport shutil\n\npaths = ['mv']\nproject_path = os.path.dirname(os.path.realpath(__file__))\nproject_path = os.path.dirname(project_path)\ndownload_path = os.path.join(project_path, 'download_files')\n\n\ndef rename_file(path):\n for root, dirs, files in os.walk(path):\n for f in files:\n f = f # type: str\n suffix = f[f.rfind('.'):]\n if suffix == '.mp3':\n new_name = f[:f.rfind('.')] + \".mp4\"\n os.rename(os.path.join(root, f), os.path.join(root, new_name))\n\n\nfor p in paths:\n rename_file(os.path.join(download_path, p))\n\n" }, { "alpha_fraction": 0.439113050699234, "alphanum_fraction": 0.6412213444709778, "avg_line_length": 51.903846740722656, "blob_id": "4d9428d2ea1a15e718af14fa2680b79d4b04a47c", "content_id": "96fe0f6ea777e48aff68fb75b1c0244e67f8bd11", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2751, "license_type": "permissive", "max_line_length": 314, "num_lines": 52, "path": "/NXSpider/debug_test/aes_decode.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/15.\n# email to [email protected]\n#\nimport base64\n\nfrom NXSpider.common.encrypt import aes_ecb, aes_ecb_decode\n\nBS = 16\npad = lambda s: s + (BS - len(s) % BS) * chr(0)\nunpad = lambda s: s[0:-ord(s[-1])]\n\naes_code = \"#14ljk_!\\]&0U<'(\"\n\nciphertext = \"Nl7KBB481H1s6KDr+0LDleV3BNgun+wbkH1VhgrfqMPZzOZVsT3BLFqC2uS7YY09/lrsyew/bRgFclEd6cIwSf8yaE4qkrunWhZhBK3vjD32WJrHnBWhpnlUpA3YcTOyE2l8UxMYHthHFfSfhsJyxJVU9FEPNLs1RUUrVY1rXw4IunfAgYR2xZjzgDJt7pqtnJSSlrBJD8OSAiA2vrMj89KpXOSxs0dSxmwlJThnZwBgYvC+9NFwnikG+yf3We9GyTtXQGipFKgTkus+RnK/jyAEizPAO7HO5h+EgqCp1Mlddnob0oxyI0AeAhvf38ZbdnuvfvqG4XG9IhvI2Npr3J1nirpvoP5RpapGO+R42raX8fINZaS3q5nKQf8zAhjRNj8a9HHlL6sdqQfU5KaSJEyHlNeASUquFE5lsByFK9kXERkXSn1iJncZUUahRgTj6xpbVZOUCmlj8mp1VksKC/ixTXlVoSbGs8xwxuO79DkyuGeYx+K6zXUmOvlsvfAVb6Po37vhZbqZ9YEyZxPVRAH1JCPIpkJTQJA4CCJPTT+vBJ846IwwL6gjK/AIW3exkOKuKgyUb0yddP4zJixj2DRYfzUs1x+EOYBo+PYI7EgT/RQTTeYLAJwU9rbChG5Mt6IpHFxmlikMjvEgmKnnzlw0/imiqUiNFXvxEwLm6m0O/6XjmNtshdQKbROB1N5LUjt+vhxJtUsiDb2CLfDLOA==\"\nciphertext = \"L64FU3W4YxX3ZFTmbZ+8/W7h/5Kqax2VT/58oRU5NjAZFPGpbk27t43eUWK5m1z8cvMlvA6anlBJkkYZjrBytF6xUIqji5RKJ1g6kFzVz/lIGDPfTePng5yRmh3s77Ndg7GRhTKI+EHmY1hk5ZjJ0Q6BQ4z3zqU5O60jvv+tQI7XlsOyzC7gW/qmwCAqG4Vg1LD+PEPx+/Fj5tbgs2eAef3rWvT+Jb15HmAGq7Yz698PX9R8pRfVLUCUBFoUaJKK2O0zrblFbCEVaM4gwwiGITV2OP00qM7dYhEp5js+CInCq8C4IrncXu6V9ZBmjkyeobbi44TO9VA/oujHbF5zEDW1EVpNsillUe/zSDUXIGUWzXdYGFcz42YlJuM+VnSWL4RPI9UQUVTj30ZZPuBgmTmHmWVB8U3CKAgEV5PDiEAzyrPZ6eobHbVcmA5HsbXY0Phxvj7usONl3wOt9Yc+AaeHZyc3hzG4lVVapoXl/U2cvfVgIqVtnuJE4XM4NuUd\"\nciphertext = \"L64FU3W4YxX3ZFTmbZ+8/deQY+6D2ll716ETGIclcTJUqCR71zqAGY3e9XRujlth+uXXI4cnOHzmPXJ/TJUxdY5nJA95aGn5DTQPEDtkaxzlSeq5qUmAkyFZv0Z+QStPVJky2QfcU1MiYX/iVUD7SC5543s8XzFBw8XL74d8yuh8mbzaGUSMt/FMzXqV0fWQXVJexSPn18Ql8RyzN9n+lLMRsbxONXHoaum/bTgCWM5hg+4/VR1xTL465Jqb847PYODL4Uhi44BxaRz4hHmaalQ7mmw9aHiQ3v9qsrSfiQH8zKeRPNDBZLyH+E3GBt4l472tQFm0hLXa57JaskZ8aqK7wyoWkgyG7SJu8tk72hkngn99EIDE2oHy4l1iMs7iiQV2KmcPDGI1buYZoT7waSJaK9BTl7sOA86iQ1f39o5+fchTXEhl9WMyBBeS5sokCS61iuBenz5t3IGEZQ0Ci22d6mk+5joa7pefLlnOtRkSA7FZ0L7VIITat2apwn5M\"\nciphertext = \"wUxNl9+oaCX1+TGUoqXxPMW1W97D0rB/UWviTQG1qGf7VPc9xScpSdO78dNI29kCFN9qWTWmEEe2JvA0TgeZBdmmiikLL8RQ6zvuAfKjzTOJhL6Ff1X4hw6syUlw+wqQwRL0WQvY6ysW/OlDmqa/GHaZOirjQv580Xplu9P2wBfxW3T8HrjzPdtS05xs2Pa6JgoErDzGhmlv7ny0yYWkHqwfFtlDDlwUTAAEOXhEFI/yNE5qt4jaQABURo9ZCBoircTF92wi2BSkQHoTzDgarpOnvzmafSOR+46gt3OolILhXrtef/wPpXih9gkLrKY0\"\nciphertext = \"I7ZTqvekU9UjuYios3KeugpKIryhEim4OTsU7t3xaN2ZBcRHRQ0XzpsDmVa76Sh8aJ4JOmBcD37oN9sLBCkqOpoGAVROkWpNfEsYTsLz13E4Q4OIT+BBn9Acvw9CNtviGXBUyDkeec3x4RGuI6T/efUn+MntkU2ZkyyIt7VhcoV7XTFGyFx9A2UB72Tw/vn2iRXeTeS0wuAJqxJN6b9GINrizT8a+QORT7FoyPl7e4Zf8Sg4E51UnhuOGtulNc8n7MnOkbyfJSZWfHD2sHRi6g==\"\nplaintext = 'music:{\"albumId\":2084576,\"alias\":[],\"mMusic\":\"{\\\\\"fid\\\\\":2007708232328038,\\\\\"br\\\\\":160000,' \\\n '\\\\\"size\\\\\":10701396}\",\"album\":\"Eternal Light\",\"musicId\":22712173,\"musicName\":\"Refrain\",\"mvId\":0,' \\\n '\"duration\":532000,\"lMusic\":\"{\\\\\"fid\\\\\":2008807743955814,\\\\\"br\\\\\":96000,\\\\\"size\\\\\":6437585}\",' \\\n '\"bitrate\":320000,\"albumPic\":\"http://p2.music.126.net/fNtMX44fvaGByURP0AbOZQ==/836728348761063.jpg\",' \\\n '\"mp3DocId\":1981319953261764,\"artist\":[[\"Anan Ryoko\",16069]],\"hMusic\":\"{\\\\\"fid\\\\\":1981319953261764,' \\\n '\\\\\"br\\\\\":320000,\\\\\"size\\\\\":21359877}\",\"albumPicDocId\":836728348761063}\"'\nplaintext = 'music:{\"musicId\":28341255,\"musicName\":\"Dancing Alone\",\"albumId\":2774171,\"album\":\"Remove Before Flight\",\"albumPicDocId\":6025323720389239,\"albumPic\":\"http://p1.music.126.net/w0uwPgaqDCJYYZJjzQfa7w==/6025323720389239.jpg\",\"bitrate\":320000,\"mp3DocId\":\"69bb414114182d8e7b8a5214168bc991\",\"duration\":211200}'\nplaintext = 'music:{\"musicId\":28341255,\"musicName\":\"Dancing Alone\",' \\\n '\"albumId\":2774171,\"album\":\"Remove Before Flight\",\"albumPicDocId\":6025323720389239,' \\\n '\"albumPic\":\"http://p1.music.126.net/w0uwPgaqDCJYYZJjzQfa7w==/6025323720389239.jpg\",' \\\n '\"bitrate\":320000,\"mp3DocId\":\"69bb414114182d8e7b8a5214168bc991\",\"duration\":211200}'\nplaintext = 'music:{\"musicId\":28341255,\"musicName\":\"Dancing Alone\",' \\\n '\"albumId\":2774171,\"album\":\"haha\",\"albumPicDocId\":6025323720389239,' \\\n '\"bitrate\":320000,\"mp3DocId\":\"69bb414114182d8e7b8a5214168bc991\",\"duration\":211200}'\n# plaintext = 'music:{\"musicId\":28341255,\"musicName\":\"Dancing Alone\",' \\\n# '\"bitrate\":320000}'\n# plaintext = 'mv:{\"title\":\"LATATA\",\"mvId\":5906035,\"artistId\":127524,' \\\n# '\"artistName\":\"Rainbow\",\"picId\":5991238859751539,\"pubTime\":\"2013-05-17\",' \\\n# '\"bitrate\":240,\"duration\":258000,\"briefIntro\":\"\",\"detailIntro\":\"\"}'\n# plaintext = 'mv:{\"title\":\"LATATA\",\"mvId\":5906035,\"artistId\":14055085,' \\\n# '\"artistName\":\"(G)I-DLE\",\"picId\":109951163279867003,\"pubTime\":\"2018-05-02\",' \\\n# '\"bitrate\":720,\"duration\":221000,\"briefIntro\":\"\",\"detailIntro\":\"\"}'\n\nb = aes_code.encode('utf-8')\na = base64.b64decode(ciphertext)\na = aes_ecb_decode(ciphertext, aes_code)\nc = aes_ecb(plaintext, aes_code)\nd = c.decode()\n\nprint(d)\nprint(d)\n" }, { "alpha_fraction": 0.5011028051376343, "alphanum_fraction": 0.507278323173523, "avg_line_length": 26.64634132385254, "blob_id": "6c8d9a50fde0455f8c3b2dcacce2089db3fcd7f6", "content_id": "1fda559ba5125955eedbcab03ba9b7182cde17ff", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2267, "license_type": "permissive", "max_line_length": 87, "num_lines": 82, "path": "/NXSpider/spider/mv.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/20.\n# email to [email protected]\n#\nimport os\nimport re\n\nfrom NXSpider.common.config import Config\nfrom NXSpider.spider.api import get_mv_link\nfrom NXSpider.spider.artist import Artist\nfrom NXSpider.spider.base_driver import *\n\n\ndef get_target_r(obj, limit_r=Config().get_mv_resolution()):\n max_valid = max([x['br'] for x in obj['brs']])\n return limit_r if max_valid > limit_r else max_valid\n\n\nclass MV(Music163Obj):\n __model_name__ = Mp4Model\n __model_rfilter__ = {\n }\n __parse_recursion__ = {\n 'artists': Artist(),\n }\n\n def download_filename(self, doc):\n \"\"\"\n implement pls\n get a name to save file\n need be complete by child\n :param doc:\n :return:\n \"\"\"\n authors = \",\".join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mv_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = \"%s - %s.mp4\" % (author, mv_name)\n return name\n\n def download_filename_full(self, doc):\n \"\"\"\n implement pls\n get a path to save file, by relative path\n need be complete by child\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n authors = \",\".join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mv_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = os.path.join(author, \"%s - %s.mp4\" % (author, mv_name))\n return name\n\n def url_load(self, doc):\n \"\"\"\n implement pls\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n try:\n target_r = get_target_r(doc, Config().get_mv_resolution())\n doc['download_video_r'] = target_r\n return get_mv_link(doc['id'], target_r)\n except:\n return None\n\n def shortcut_self_path(self, doc):\n \"\"\"\n implement pls, not force\n return self short cut path\n :param doc:\n :return:\n \"\"\"\n result = []\n result.extend([os.path.join(\"artist\", re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', x['name']))\n for x in doc.artists])\n return result\n" }, { "alpha_fraction": 0.5629921555519104, "alphanum_fraction": 0.586614191532135, "avg_line_length": 20.16666603088379, "blob_id": "5c3dea037165cd937978439b869b6f853a5d2094", "content_id": "00b7c92bb06e4132dbbd5c12886224d6a47cd901", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "permissive", "max_line_length": 52, "num_lines": 24, "path": "/NXSpider/spider/album.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/26.\n# email to [email protected]\n#\n\nfrom NXSpider.spider.artist import Artist\nfrom NXSpider.spider.base_driver import *\n\n\nclass Album(Music163Obj):\n __model_name__ = AlbumModel\n __model_rfilter__ = {\n # 'song', # TODO need delete???\n 'artist',\n }\n __parse_recursion__ = {\n 'artists': Artist(),\n }\n\n @attr_replace(attr_name='songs', new_name='mp3')\n def replace_song(self, obj):\n return obj\n" }, { "alpha_fraction": 0.6957504749298096, "alphanum_fraction": 0.7034357786178589, "avg_line_length": 23.842697143554688, "blob_id": "3211e6d00cef7ff32038c21e5ffda2e4202fc108", "content_id": "5b0a6e0dcda0ff66346e7a48dd3460c21e78492a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2212, "license_type": "permissive", "max_line_length": 78, "num_lines": 89, "path": "/NXSpider/common/tools.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMIT License\n\nCopyright (c) 2017 Cheng YuMeng\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport codecs\nimport contextlib\nimport hashlib\nimport locale\n\nfrom NXSpider.common import PYTHON3\n\nRETURN_JSON = \"return json data\"\nRETURE_HTML = \"return html data\"\n\nlang, sys_encoding = locale.getdefaultlocale()\n\[email protected]\ndef ignored(*exceptions):\n try:\n yield\n except exceptions:\n pass\n\n\ndef encode(s):\n if PYTHON3 is True:\n return codecs.encode(s, \"utf-8\").decode(\"utf-8\")\n else:\n return s.encode(\"utf-8\")\n\n\ndef encode_sys(s):\n s.encode(sys_encoding)\n\n\ndef hex(s):\n if PYTHON3 is True:\n return codecs.encode(bytes(s, encoding=\"utf8\"), 'hex')\n else:\n return s.encode(\"hex\")\n\n\ndef md5(s):\n m = hashlib.md5()\n m.update(s.encode(\"utf-8\"))\n return m.hexdigest()\n\n\ndef is_unicode(obj):\n if PYTHON3:\n return isinstance(obj, str)\n else:\n from __builtin__ import unicode\n return isinstance(obj, unicode)\n\n\ndef input_format(string):\n \"\"\"\n :type string: str\n :param string:\n :return:\n \"\"\"\n if PYTHON3:\n return string\n else:\n return string.encode()\n\n" }, { "alpha_fraction": 0.5391644835472107, "alphanum_fraction": 0.5489556193351746, "avg_line_length": 25.413793563842773, "blob_id": "122dc14c3630ef281f91bcdf46677b4d920a59d1", "content_id": "154f9f791c283ba1c48c05f04736b6da3361597c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1532, "license_type": "permissive", "max_line_length": 71, "num_lines": 58, "path": "/NXSpider/spider/video.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/12/12.\n# email to [email protected]\n#\n\nimport os\nimport re\n\nfrom NXSpider.common.config import Config\nfrom NXSpider.spider.api import get_video_link\nfrom NXSpider.spider.artist import Artist\nfrom NXSpider.spider.base_driver import *\n\n\ndef get_target_r(obj, limit_r=Config().get_mv_resolution()):\n max_valid = max([x['resolution'] for x in obj['resolutions']])\n return limit_r if max_valid > limit_r else max_valid\n\n\nclass Video(Music163Obj):\n __model_name__ = VideoModel\n __model_rfilter__ = {\n }\n __parse_recursion__ = {\n 'artists': Artist(),\n }\n\n def download_filename_full(self, doc):\n \"\"\"\n implement pls\n get a path to save file, by relative path\n need be complete by child\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n # todo modify\n authors = \",\".join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mp3_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = os.path.join(author, \"%s - %s.mp4\" % (author, mp3_name))\n return name\n\n def url_load(self, doc):\n \"\"\"\n implement pls\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n try:\n target_r = get_target_r(doc, Config().get_mv_resolution())\n doc['download_video_r'] = target_r\n return get_video_link(doc['id'], target_r)\n except:\n return None\n" }, { "alpha_fraction": 0.49380558729171753, "alphanum_fraction": 0.5022236108779907, "avg_line_length": 30.01477813720703, "blob_id": "a1184863c446c795f067e5b8a110fb09ef6a9da3", "content_id": "e80ff9c0e9f50356db461db01d919055b25003ec", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6296, "license_type": "permissive", "max_line_length": 104, "num_lines": 203, "path": "/NXSpider/common/config.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\nimport json\nimport os\n\nfrom NXSpider.common import log\nfrom NXSpider.common.constant import config_path, default_download_dir\nfrom NXSpider.common.singleton import Singleton\n\ndefault_path_key = 'default'\nmv_resolutions = [240, 480, 720, 1080]\n\n\ndef utf8_data_to_file(f, data):\n if hasattr(data, 'decode'):\n f.write(data.decode('utf-8'))\n else:\n f.write(data)\n\n\nclass Config(Singleton):\n def __init__(self):\n if hasattr(self, '_init'):\n return\n self._init = True\n self.config_file_path = config_path\n\n # the deep level must be less than 2\n self.default_config = {\n 'mongo': {\n 'name': 'nxspider',\n 'host': 'localhost',\n 'port': 27017,\n 'username': 'None',\n 'password': 'None',\n },\n 'download_path': [default_download_dir],\n 'mv_def_resolution': 720,\n 'media_tag': True,\n 'media_tag_163': True,\n 'download_file_check': True,\n 'debug_log': True,\n 'no_mongo': True,\n 'shortcut': False,\n }\n self.config = {}\n\n need_save = False\n try:\n f = open(self.config_file_path, 'r')\n self.config = json.loads(f.read())\n f.close()\n except IOError or ValueError:\n self.__generate_config_file()\n need_save = True\n\n # merge default\n for k, v in self.default_config.items():\n if k not in self.config:\n self.config[k] = v\n need_save = True\n\n paths = self.config['download_path'] # type: list\n if len(paths) == 0:\n paths.append(default_download_dir)\n need_save = True\n\n if need_save:\n self.save_config_file()\n\n def __getitem__(self, key):\n if key in self.config:\n return self.config[key]\n return None\n\n def __generate_config_file(self):\n f = open(self.config_file_path, 'w')\n utf8_data_to_file(f, json.dumps(self.default_config, indent=2))\n f.close()\n\n def save_config_file(self):\n f = open(self.config_file_path, 'w')\n utf8_data_to_file(f, json.dumps(self.config, indent=2))\n f.close()\n\n def get_shortcut(self):\n return self.config['shortcut']\n\n def get_mongo(self):\n return self.config['mongo']\n\n def get_path(self):\n return self.config['download_path'][0]\n\n def get_paths(self):\n return self.config['download_path']\n\n def get_mv_resolution(self):\n return self.config['mv_def_resolution']\n\n def get_media_tag(self):\n return self.config['media_tag']\n\n def get_media_tag_163(self):\n return self.config['media_tag_163']\n\n def get_file_check(self):\n return self.config['download_file_check']\n\n def get_no_mongo(self):\n return self.config['no_mongo']\n\n def save_config_dict(self, obj):\n \"\"\"\n :type obj: dict\n :param obj:\n :return:\n \"\"\"\n for k, v in obj.items():\n if isinstance(v, dict) and isinstance(self.config[k], dict):\n self.config[k].update(v)\n else:\n self.config[k] = v\n\n def config_test(self):\n result = True\n try:\n\n # check mongodb config\n if self.config['no_mongo'] is False:\n log.print_info('check mongodb config')\n mongo = self.config['mongo']\n for k in ['name', 'host', 'port']:\n if k not in mongo:\n log.print_err(\"mongo config error, key mongo.{} is not set yet\".format(k))\n result = False\n\n # try import model, which will connect to server and exit if server config wrong\n import NXSpider.model.mongo_model\n\n for k in ['download_path', 'mv_def_resolution', 'media_tag', 'media_tag_163']:\n if k not in self.config:\n log.print_err(\"config error, key {} is not set yet\".format(k))\n result = False\n\n # check type\n type_check = {\n 'download_path': list,\n 'mv_def_resolution': int,\n 'media_tag': bool,\n 'media_tag_163': bool,\n 'download_file_check': bool,\n 'no_mongo': bool,\n }\n\n need_save = False\n for k, v in type_check.items():\n if not isinstance(self.config[k], v):\n log.print_err(\"config error, {} is not a require type, \"\n \"and is reset to default value: {}\".format(k, self.default_config[k]))\n self.config[k] = self.default_config[k]\n need_save = True\n result = False\n\n # download path check\n final_paths = []\n for p in self.config['download_path']:\n try:\n # some error need pass\n if os.path.isdir(p) is False:\n os.mkdir(p)\n final_paths.append(p)\n except:\n log.print_warn(\"download path may be wrong and be deleted: {}\".format(p))\n need_save = True\n result = False\n pass\n\n # mv resolution check\n if self.config['mv_def_resolution'] not in mv_resolutions:\n log.print_warn(\"mv_def_resolution will be reset to default: {}\"\n .format(self.default_config['mv_def_resolution']))\n self.config['mv_def_resolution'] = self.default_config['mv_def_resolution']\n need_save = True\n result = False\n\n if need_save:\n self.config['download_path'] = final_paths\n self.save_config_file()\n\n return result\n except Exception as e:\n log.print_err(e)\n\n return False\n\n def config_reset(self):\n self.config = self.default_config\n self.save_config_file()\n" }, { "alpha_fraction": 0.6059637665748596, "alphanum_fraction": 0.6448349356651306, "avg_line_length": 25.828571319580078, "blob_id": "dd33d4596c0fc2bbcee9abb15c27b21f8b82e59b", "content_id": "abbdf977aa3e6baca5a2db2c08a58a98e54d815f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1878, "license_type": "permissive", "max_line_length": 80, "num_lines": 70, "path": "/NXSpider/common/encrypt.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\n\nimport base64\n\nfrom cryptography.hazmat.primitives.ciphers import (\n Cipher, algorithms, modes\n)\nfrom cryptography.hazmat.backends import default_backend\n\n\ndef aes(text, sec_key):\n backend = default_backend()\n pad = 16 - len(text) % 16\n text_t = text + pad * chr(pad)\n cipher = Cipher(\n algorithms.AES(sec_key.encode('utf-8')),\n modes.CBC(b'0102030405060708'),\n backend=backend\n )\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(text_t.encode('utf-8')) + encryptor.finalize()\n ciphertext = base64.b64encode(ciphertext)\n return ciphertext\n\n\ndef aes_decode(text, sec_key):\n backend = default_backend()\n ciphertext = base64.b64decode(text)\n cipher = Cipher(\n algorithms.AES(sec_key.encode('utf-8')),\n modes.CBC(b'0102030405060708'),\n backend=backend\n )\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(ciphertext) + decryptor.finalize()\n return plaintext\n\n\ndef aes_ecb_decode(text, sec_key):\n backend = default_backend()\n ciphertext = base64.b64decode(text)\n cipher = Cipher(\n algorithms.AES(sec_key.encode('utf-8')),\n modes.ECB(),\n backend=backend\n )\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(ciphertext) + decryptor.finalize()\n return plaintext\n\n\ndef aes_ecb(text, sec_key):\n backend = default_backend()\n text = text.encode('utf-8')\n pad = 16 - len(text) % 16\n text_t = text + (b'\\0' * pad)\n cipher = Cipher(\n algorithms.AES(sec_key.encode('utf-8')),\n modes.ECB(),\n backend=backend\n )\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(text_t) + encryptor.finalize()\n ciphertext = base64.b64encode(ciphertext)\n return ciphertext\n" }, { "alpha_fraction": 0.5271945595741272, "alphanum_fraction": 0.5307171940803528, "avg_line_length": 29.856521606445312, "blob_id": "a2ee77697e3d80288bcdabc164de0dbd0148d59c", "content_id": "777289756164b98bb187586869f34e6d4b7e0594", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14196, "license_type": "permissive", "max_line_length": 86, "num_lines": 460, "path": "/NXSpider/spider/base_driver.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/25.\n# email to [email protected]\n#\nimport codecs\nimport json\nimport os\nfrom inspect import isfunction\n\nimport requests\nimport six\n\nfrom NXSpider.common.config import Config\nfrom NXSpider.common import tools, log\nfrom NXSpider.model.export import model_download_path\nfrom NXSpider.utility.media_tag import attach_media_tag\nfrom NXSpider.model.export import *\n\n\nclass Music163ObjException(Exception):\n pass\n\n\nclass Music163ObjMetaClass(type):\n def __new__(mcs, name, bases, attrs):\n \"\"\"\n :param name:\n :type name: str\n :param bases:\n :param attrs:\n :type attrs: dict\n :return:\n \"\"\"\n if name == \"Music163Obj\":\n return type.__new__(mcs, name, bases, attrs)\n if '__model_name__' not in attrs or attrs['__model_name__'] is None:\n attrs['__model_name__'] = name + \"Model\"\n\n # set download level 1 path\n attrs['__file_type__'] = name.lower()\n\n # create download file dir for download path\n # obj_path = os.path.join(get_download_path(), attrs['__file_type__'])\n # if not os.path.exists(obj_path):\n # os.makedirs(obj_path)\n\n # create a empty dict\n attrs['__attrs_replace_fucs__'] = dict()\n attrs['__attrs_replace_map__'] = dict()\n\n # set replace\n replace_map = dict()\n for k, v in attrs.items():\n if isfunction(v):\n if getattr(v, '__attrs_replace__', False):\n org_name = getattr(v, '__attrs_replace_org__')\n replace_map[org_name] = getattr(v, '__attrs_replace_new__')\n attrs['__attrs_replace_fucs__'][org_name] = v\n attrs['__attrs_replace_map__'] = replace_map\n return type.__new__(mcs, name, bases, attrs)\n\n\ndef attr_replace(attr_name=None, new_name=None):\n \"\"\"\n replace attr value\n decorator for fun: (self, obj)->object\n\n :param new_name:\n :param attr_name:\n :return:\n \"\"\"\n\n def de(f):\n \"\"\"\n :type f: function\n :param f:\n :return:\n \"\"\"\n f.__attrs_replace__ = True\n def_name = f.__name__\n f.__attrs_replace_org__ = attr_name if attr_name else def_name\n f.__attrs_replace_new__ = new_name if new_name else def_name\n return f\n\n return de\n\n\n# class Music163Obj(object, metaclass=Music163ObjMetaClass):\nclass Music163Obj(six.with_metaclass(Music163ObjMetaClass)):\n __model_name__ = None\n __model_rfilter__ = set()\n __file_type__ = 'other'\n __parse_recursion__ = {}\n\n __attrs_replace_fucs__ = dict()\n __attrs_replace_map__ = dict()\n\n # __metaclass__ = Music163ObjMetaClass\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n In some case, something want to change def class setting,\n like recursion or filter\n :param args:\n :param kwargs:\n \"\"\"\n for k, v in kwargs.items():\n if k in ['__parse_recursion__', '__model_rfilter__']:\n setattr(self, k, v)\n\n def download_filename(self, doc):\n \"\"\"\n implement pls\n get a name to save file\n need be complete by child\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n return None\n\n def download_filename_full(self, doc):\n \"\"\"\n implement pls\n get a path to save file, by relative path\n need be complete by child\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n return None\n\n def url_load(self, doc):\n \"\"\"\n implement pls\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n return None\n\n def pre_save(self, doc, obj):\n \"\"\"\n implement pls, not force\n do something before doc save\n get download link here is better\n :param doc:\n :param obj:\n :return:\n \"\"\"\n pass\n\n def shortcut_relative_name(self, doc):\n \"\"\"\n implement pls, not force\n return short cut relative path\n :param doc:\n :return:\n :rtype: str\n \"\"\"\n pass\n\n def shortcut_self_path(self, doc):\n \"\"\"\n implement pls, not force\n return self short cut path\n :param doc:\n :return:\n :rtype: list[str]\n \"\"\"\n return []\n\n @staticmethod\n def request_file(doc):\n \"\"\"\n implement pls\n :param doc:\n :return:\n :rtype: bytes\n \"\"\"\n url = None\n try:\n url = getattr(doc, model_download_url, None)\n if url is None:\n return None\n\n r = requests.get(url)\n if r.status_code != 200:\n return None\n return r.content\n except Exception as e:\n log.print_info(u\"url download failed %s , err: %s\" % (url, e))\n return None\n\n def download_relative_path(self, doc):\n download_file_name = 'download_file_name'\n if download_file_name not in doc:\n doc[download_file_name] = self.download_filename_full(doc)\n filename = doc[download_file_name]\n if filename:\n return os.path.join(self.__file_type__, filename)\n else:\n return None\n\n def download_check(self, doc, check_file=False):\n \"\"\"\n :param doc:\n :param check_file: check dist file to download\n :type doc: DynamicDocument\n :return:\n \"\"\"\n if check_file:\n file_relative_path = self.download_relative_path(doc)\n for path in Config().get_paths():\n file_path = os.path.join(path, file_relative_path)\n if os.path.exists(file_path):\n self.download_log(doc, download_path=file_path)\n return file_path\n self.download_log(doc, downloaded=False)\n return False\n\n return hasattr(doc, model_is_download) and doc[model_is_download]\n\n def create_shortcut(self, doc, shortcuts_stack):\n \"\"\"\n :param doc:\n :param shortcuts_stack:\n :type shortcuts_stack: list[str]\n :return:\n \"\"\"\n from NXSpider.utility.shortcut import symlink\n if not (hasattr(doc, model_is_download) and doc[model_is_download]\n and hasattr(doc, model_download_path)\n and os.path.exists(doc[model_download_path])\n and self.download_filename(doc)):\n return\n\n file_path = doc[model_download_path]\n shortcut_root = os.path.join(Config().get_path(), 'shortcuts')\n file_name = self.download_filename(doc)\n if file_path and file_path is not True:\n if shortcuts_stack:\n path = os.path.join(shortcut_root, *shortcuts_stack)\n os.makedirs(path, exist_ok=True)\n target = os.path.join(path, file_name)\n symlink(file_path, os.path.join(shortcut_root, target))\n\n for k in self.shortcut_self_path(doc):\n path = os.path.join(shortcut_root, k)\n os.makedirs(path, exist_ok=True)\n target = os.path.join(path, file_name)\n symlink(file_path, os.path.join(shortcut_root, target))\n pass\n\n @staticmethod\n def download_log(doc, downloaded=True, download_path=\"\"):\n doc[model_is_download] = downloaded\n doc[model_download_path] = download_path\n\n def download_file_tag(self, filename, doc):\n if self.__file_type__ in ['mp3', 'mp4', 'mv'] \\\n and Config().get_media_tag():\n attach_media_tag(doc, filename)\n\n def download_file(self, doc):\n \"\"\"\n download file from music 163\n :param doc:\n :return:\n \"\"\"\n file_relative_path = self.download_relative_path(doc)\n path = Config().get_path()\n content = self.request_file(doc)\n\n if content is None:\n log.print_err(u\"file download failed : %s\" % file_relative_path)\n return False\n\n try:\n file_name = os.path.join(path, file_relative_path)\n\n # dir make\n dir_name = os.path.dirname(file_name)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n # file write\n with open(file_name, \"wb\") as code:\n code.write(content)\n\n self.download_file_tag(file_name, doc)\n\n log.print_info(u\"file download complete: %s\" % file_relative_path)\n self.download_log(doc)\n return True\n except Exception as e:\n log.print_err(\"file save failed : %s, err: %s\" % (file_relative_path, e))\n return False\n\n def debug_save_json(self, obj):\n with tools.ignored(Exception):\n relative_path = os.path.join(self.__file_type__ + '.debug',\n self.__file_type__ + \"_\" +\n str(obj['id']) + '.json')\n file_name = os.path.join(Config().get_path(), relative_path)\n with codecs.open(file_name, \"wb\", encoding='utf8') as code:\n test = json.dumps(obj, ensure_ascii=False)\n code.write(test)\n\n def debug_print(self, obj):\n msg = u\"spider {} complete, id: {}\".format(self.__file_type__, obj['id'])\n if 'name' in obj:\n msg += u\", name: {}\".format(obj['name'])\n\n log.print_info(msg)\n\n def try_download(self, doc, download_type, file_check):\n if self.__file_type__ not in download_type:\n return True\n\n # download file and set download flag\n if download_type is not None \\\n and not self.download_check(doc, check_file=file_check):\n # need download, try url which is set first or get new url and download\n if getattr(doc, model_download_url, None) \\\n and self.download_file(doc):\n return True\n\n # get download link here\n doc[model_download_url] = self.url_load(doc)\n self.download_file(doc)\n else:\n name = self.download_relative_path(doc)\n if name:\n log.print_info(u\"file is exist or is not need to download : %s\"\n % name)\n\n def parse_model(self, crawl_dict, download_type=None,\n file_check=False, save=True, debug=False,\n shortcuts_stack=None):\n \"\"\"\n Get a model from db or create, update and save!!!\n this will replace some attributes into models by load_save_model also.\n by @attr_replace(attr_name, new_name)\n :param debug:\n :param file_check:\n :param download_type:\n :param save: save doc\n :param crawl_dict: must have id attr\n :param shortcuts_stack: stack of shortcuts path\n :type crawl_dict: dict\n :type shortcuts_stack: list[str]\n :return:\n :rtype: DynamicDocument\n :type save: bool\n \"\"\"\n if debug:\n # self.debug_save_json(crawl_dict)\n pass\n\n # get id\n if 'id' not in crawl_dict:\n log.print_err(u\"can not load id by json obj %s\" % json.dumps(crawl_dict))\n return None\n doc_id = crawl_dict['id']\n\n # load a mongo document\n doc, is_new_doc = get_one_model_by_key(self.__model_name__, doc_id)\n if doc is None:\n log.print_err(u\"can not load a doc by obj %s_%d\"\n % (self.__file_type__, doc_id))\n return None\n\n # shortcuts in stack\n if shortcuts_stack is not None and isinstance(shortcuts_stack, list) \\\n and self.shortcut_relative_name(doc):\n shortcuts_stack.append(self.shortcut_relative_name(doc))\n\n # if is_new_doc:\n # replace attr or ignore\n obj = dict()\n for k, v in crawl_dict.items():\n if k in self.__model_rfilter__:\n continue\n obj[k] = v\n\n # replace object\n if k not in self.__attrs_replace_fucs__:\n continue\n\n # change attr\n if isinstance(v, list):\n v = [self.__attrs_replace_fucs__[k](self, x) for x in v]\n else:\n v = self.__attrs_replace_fucs__[k](self, v)\n\n # replace key name\n del obj[k]\n obj[self.__attrs_replace_map__[k]] = v\n\n # recursion replace a attr into a model\n for k, v in self.__parse_recursion__.items():\n if k not in obj:\n continue\n\n if isinstance(obj[k], list):\n obj[k] = [v.parse_model(x, save=save, download_type=download_type,\n file_check=file_check, debug=debug,\n shortcuts_stack=shortcuts_stack)\n for x in obj[k]]\n elif isinstance(obj[k], dict):\n obj[k] = v.parse_model(obj[k], save=save, download_type=download_type,\n file_check=file_check, debug=debug,\n shortcuts_stack=shortcuts_stack)\n\n # update json to doc, this must be after recursion\n update_dynamic_doc(doc, obj)\n\n # modify doc and\n self.pre_save(doc, crawl_dict)\n\n # try download\n self.try_download(doc, download_type, file_check)\n\n if shortcuts_stack:\n if self.shortcut_relative_name(doc):\n shortcuts_stack.pop()\n self.create_shortcut(doc, shortcuts_stack)\n\n # save document\n if save and callable(getattr(doc, 'save', None)):\n doc.save()\n\n if debug:\n self.debug_print(crawl_dict)\n\n return doc\n\n\n__all__ = [\n 'Music163Obj',\n 'attr_replace',\n 'Music163ObjException',\n\n 'ConfigModel',\n 'UserModel',\n 'AlbumModel',\n 'PlaylistModel',\n 'Mp4Model',\n 'VideoModel',\n 'ArtistModel',\n 'Mp3Model',\n 'AuthorModel',\n 'update_dynamic_doc',\n 'model_download_url',\n 'model_is_download',\n 'get_one_model_by_key',\n]\n" }, { "alpha_fraction": 0.619271457195282, "alphanum_fraction": 0.6321974396705627, "avg_line_length": 23.314285278320312, "blob_id": "8f211dd3739379317f3b16469266fd74c03aca61", "content_id": "a461ba57bf3615e7e28e78cab6a3f5255be7c356", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "permissive", "max_line_length": 72, "num_lines": 35, "path": "/NXSpider/debug_test/move.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/14.\n# email to [email protected]\n#\nimport os\nimport shutil\nimport sys\nfrom glob import glob\n\n# reload(sys)\n# sys.setdefaultencoding('utf8')\n\npaths = ['mv', 'mp3']\nproject_path = os.path.dirname(os.path.realpath(__file__))\nproject_path = os.path.dirname(project_path)\ndownload_path = os.path.join(project_path, 'download_files')\n\n\ndef move_file(path):\n files = os.listdir(path)\n for f in files:\n if os.path.isdir(os.path.join(path, f)):\n continue\n\n author = f[0: f.find(' - ')].strip()\n author_path = os.path.join(path, author)\n if not os.path.exists(author_path):\n os.makedirs(author_path)\n shutil.move(os.path.join(path, f), os.path.join(author_path, f))\n\n\nfor p in paths:\n move_file(os.path.join(download_path, p))\n" }, { "alpha_fraction": 0.6714165806770325, "alphanum_fraction": 0.6974015235900879, "avg_line_length": 27.759037017822266, "blob_id": "c194360d84bcbb7b02fae99f9ebac455922339a5", "content_id": "3531c5fa50ce0bc70ecfdf790fa506ecc3d56fe4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3794, "license_type": "permissive", "max_line_length": 214, "num_lines": 83, "path": "/SIMPLE_USE.md", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "NXSpider\n=================\n\nNXSpider, NetEase X Spider, A powerful for mp3,mv spider, can help you download mp3 and mv with media tags. Base on python, mongodb, and recursion algorithm\n\n[![Software License](https://img.shields.io/pypi/l/Django.svg)](LICENSE.md)\n![platform](https://img.shields.io/badge/python-2.7-green.svg)\n![platform](https://img.shields.io/badge/python-3.5-green.svg)\n\n[新手先看这里](SIMPLE_USE.md) | [历史版本在这里](VERSION.md) | [开发详情在这里](DEV.md)\n\n这里为非IT同学增加最简单使用方法(请保证电脑能上网),请根据以下流程。注意,所有跳转建议按住ctrl后再点击(或右键新窗口打开)\n\n### 点赞\n- 请给项目点个star\n- 请感谢网易云,感谢上帝,感谢国家,顺便的话感谢作者\n\n## Linux用户\n[摸着自己的良心说,到底是不是IT人员](README.md)\n\n## Mac用户\n待补充\n\n## Windows用户\n\n### 建议\n- 使用python3.6,最好卸载2.7\n- 所有路径(python安装路径,NXSpider路径,下载路径)不要有中文名,空格希望也不要有\n\n### 下载python3(自行跳过)\n- [点击这里下载(32)](https://www.python.org/ftp/python/3.6.5/python-3.6.5.exe) 或 [点击这里下载(64)](https://www.python.org/ftp/python/3.6.5/python-3.6.5-amd64.exe) 或 [自行选择](https://www.python.org/downloads/release/python-365/)\n- 双击你下载的文件,进行安装,跟正常安装软件一样\n- 推荐大概在第一页勾选 ```for all users```\n- 推荐大概在第二页选择自定义安装路径,大概是 ```C:\\Program Files```\n- **强烈要求**大概在第二页勾选 ```Add Python to environment variables```\n![img](img/python_install_2.png)\n- 后面一直按继续即可\n- 安装完毕后按住键盘 ```win+r``` 在开始建附近会弹出个运行框,输入 ```cmd``` 回车\n![img](img/cmd.png)\n- 回车后会弹出个黑色界面,通常我们叫 console(控制台),输入 ```python``` 并回车\n- 看到以下界面则为安装成功\n![img](img/run_python.png)\n\n### 下载本项目\n- [点击这里下载](https://github.com/Grass-CLP/NXSpider/archive/master.zip) 或者点击右上角 ```clone or download``` 选择zip\n- 解压缩到你想要的目录下,比如 ```G:/NXSpider``` 并进入目录\n- 双击目录下文件 ```install.bat``` 会弹出个窗,等待结束(希望没看到error)\n\n### 运行本项目\n1. 双击运行目录下 `start.bat` \n2. 在控制台中就可以输入说明中的命令了(可以ctrl+c ctrl+v或右键复制)\n3. 建议先输入 ```nxspider -h```\n3. 先配置你的下载目录,比如目录是 ```G:/media```, 则输入并回车 ```nxspider config-spider -path G:/media```\n4. **推荐**打开你的网易云音乐,点击:本地音乐 -> 选择目录 -> 添加文件夹 -> 选择你的目录 -> 确定\n\n### 通过搜索下载示例\n $ nxspider search -ar \"张学友\"\n $ nxspider sab -ab 19093 -dw mp3,mv \n # -dw mp3,mv表示想要下载mp3和mv,不想下载mv则删除最后的,mv\n \n $ nxspider search -mp3 \"爱我别走\"\n $ nxspider smp3s -mp3 187947 -dw mp3\n \n搜索歌神唱片找到ID:\n\n![img](img/demo_search_xy.png)\n\n爬专辑19093:\n\n![img](img/demo_spider_xy.png)\n\n### 暴力下载最火歌单(不截图了)\n $ nxspider scls-pls -cls 全部 -dw mp3,mv\n\n\n### tips\n- 其他命令见 [README.md](README.md)\n- 目前暂时没做很好的退出,想退出直接把控制台(黑窗)关掉就好\n- 目前暂时没做很好的重试,每次输入命令前复制到txt里面,遇到卡住的话,关掉重新打开个重新输入运行就可以\n- 下载后重新进去网易云音乐,看看你的本地音乐,点击**匹配音乐**,是不是充满惊喜\n\n## 最后\n有其他问题请到issue下留言,或者发邮件给我LipsonChan的雅虎邮箱" }, { "alpha_fraction": 0.6365979313850403, "alphanum_fraction": 0.6597937941551208, "avg_line_length": 23.25, "blob_id": "6bc60273335c06c1aa5e1aecad9a98f96c6683f3", "content_id": "2817592a065e44c071baba87b01fc8b1ec8472bc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "permissive", "max_line_length": 63, "num_lines": 16, "path": "/NXSpider/common/constant.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/5/30.\n# email to [email protected]\n#\nimport os\n\n\nall_download_type = ['mp3', 'mv']\nmain_dir = os.path.join(os.path.expanduser('~'), '.nxspider')\nconfig_path = os.path.join(main_dir, 'config.json')\ndefault_download_dir = os.path.join(main_dir, 'download_files')\n\nif os.path.isdir(main_dir) is False:\n os.mkdir(main_dir)\n" }, { "alpha_fraction": 0.652942419052124, "alphanum_fraction": 0.6816385984420776, "avg_line_length": 26.485713958740234, "blob_id": "c8dae3781978082a219f0600417114f6f8393fef", "content_id": "3fea56e7d126bd3b85739579264105143c1955b6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6745, "license_type": "permissive", "max_line_length": 109, "num_lines": 175, "path": "/README.md", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "NXSpider\n=================\n\nNXSpider, NetEase X Spider,一个强大的(网易云音乐)mp3,mv爬虫,可以下载和收集mp3,mv信息,同时附带多媒体标签信息。采用python编写,mongo数据库(非必须),递归算法核心实现\n\n[![Software License](https://img.shields.io/pypi/l/Django.svg)](LICENSE.md)\n![platform](https://img.shields.io/badge/python-2.7-green.svg)\n![platform](https://img.shields.io/badge/python-3.5-green.svg)\n\n[新手先看这里](SIMPLE_USE.md) | [历史版本在这里](VERSION.md) | [开发详情在这里](DEV.md)\n\n## 灵感来自\n\n[chengyumeng/spider163](https://github.com/chengyumeng/spider163)\n\n[Binaryify/NeteaseCloudMusicApi](https://github.com/Binaryify/NeteaseCloudMusicApi)\n\n[darknessomi/musicbox](https://github.com/darknessomi/musicbox)\n\n[sqaiyan/netmusic-node](https://github.com/sqaiyan/netmusic-node)\n\n## 功能特性\n1. 采用命令行形式运行,配置在个人账号目录下```~/.nxspider/```\n2. 搜索歌单,歌手,专辑,用户等信息\n3. 通过各种方法,如歌单,歌手,专辑等,下载mp3, mv\n4. 所有信息会保存在mongodb中(optional)\n5. 所有mp3, mv可以通过配置,增加多媒体标签信息(歌手,专辑,唱片,163comment!!!)\n\n## 运行依赖\n1. python3(推荐) or python2, 请配置好path\n2. windows or linux\n3. [mongodb](https://docs.mongodb.com/manual/installation/)(默认非必需)\n\n## Git clone安装\n $ git clone https://github.com/Grass-CLP/NXSpider.git && cd NXSpider\n $ python(3) setup.py install\n $ pip(3) install -r requirememts.txt # optional, if you use mongodb\n\n## 使用指南\n### 简单示例\n\t$ nxspider -h\n\t# 显示帮助信息\n\n\t$ nxspider -v\n\t# 显示软件版本信息\n\n### 配置, <>内容为值,or为可选\n\t$ nxspider config-spider -path \"<you_download_path,default>\"\n\t# 强烈建议第一次前进行配置,强烈建议加上双引号保证空格等问题\n\t# 配置下载文件的路径,第一个路径为存储路径,其他路径会检查文件是否下载 \n\n\t$ nxspider config-mongo -mh <host> -mp <port> -mn <db name>\n\t# 配置mongodb服务器(非必需),必须pip安装 requirements\n\n\t$ nxspider config-check\n\t# 检查配置内容\n\n### 搜索命令\n\t# 以下为搜索命令,其中<>内文具体的文字内容,可以输入中文,韩文,日语等(注意加上双引号)\n\t$ nxspider search -ar \"<artist>\" [-offset <offset>] [-limit <limit>]\n\t$ nxspider search -pl \"<playlist>\" [-offset <offset>] [-limit <limit>]\n\t$ nxspider search -ur \"<user>\" [-offset <offset>] [-limit <limit>]\n\t$ nxspider search -mp3 \"<song>\" [-offset <offset>] [-limit <limit>]\n\t$ nxspider search -ab \"<album>\" [-offset <offset>] [-limit <limit>]\n\t$ nxspider search -mv \"<mv>\" [-offset <offset>] [-limit <limit>]\n\n### 爬取命令\n\t$ nxspider sab -ab <album_id,id1,id2> [-dw <mv,mp3>]\n $ nxspider sab -ab 3084625,3107072 -dw mv,mp3\n\t# 根据专辑id(按逗号隔开)爬取歌曲,下载mp3,mv(参数配置),所有[]内为可选内容\n\n $ nxspider smp3s -mp3 <mp3_id,id2,id3> [-dw <mv,mp3>]\n\t# 根据歌曲id(按逗号隔开)爬取歌曲\n\n\t$ nxspider spls -pl <playlist_id,id2,id3> [-dw <mv,mp3>]\n\t# 根据歌单id(按逗号隔开)爬取歌曲\n\n\t$ nxspider sar-top-mp3 -ar <artist_id,id1,id2> [-dw <mv,mp3>]\n\t# 根据歌手id爬取该歌手top50首\n\n\t$ nxspider sar-albums -ar <artist_id,id1> [-dw <mv,mp3>] [-offset <offset>] [-limit <limit>]\n\t# 根据歌手id爬取该歌手第<offset>个开始共<limit>个专辑,offset默认0,limit默认50\n\n\t$ nxspider sur-pls -ur <user_id> [-dw <mv,mp3>] [-offset <offset>] [-limit <limit>]\n\t# 根据用户id爬取该用户的所有歌单(包括收藏)\n\n\t$ nxspider scls-pls -cls <class type> [-dw <mv,mp3>] [-offset <offset>] [-limit <limit>]\n\t$ nxspider scls-pls -cls 全部 -dw mv,mp3 -offset 0 -limit 20\n\t# 根据歌单分配,爬取该分类下最火歌单,并下载mp3和mv\n\n\t$ nxspider stop-mvs [-offset <offset>] [-limit <limit>]\n\t# 爬取最火mv\n\n### 其他命令\n\t$ nxspider sw_ur_pl\n\t# 显示所有歌单分类\n\n\t$ nxspider config-mongo -nomongo <1 or 0>\n\t# 配置是否使用mongo存储数据,默认不使用,1为不使用,0为使用\n\n\t$ nxspider config-spider -mvr <240 or 480 or 720 or 1080>\n\t# 配置下载mv最高的分辨率,默认720\n\n $ nxspider config-spider --shortcut <1 or 0>\n\t# 配置是否创建快捷方式(目前只创建歌单,歌手,唱片的快捷方式文件目录),默认0\n\t# 在windows下配置1的话需要右键\"以管理员身份运行\" start.bat\n\n\t$ nxspider config-spider -tag <1 or 0>\n\t# 配置是否为媒体文件增加媒体标签,默认1\n\n\t$ nxspider config-spider -tag163 <1 or 0>\n\t# 配置是否为媒体文件增加网易云音乐app特定信息媒体标签,默认1\n\n\t$ nxspider config-check\n\t# 检查配置正确性,并修复部分错误配置\n\n\t$ nxspider sw-ar-ab -ar <artist_id> [-offset <offset>] [-limit <limit>]\n\t# 通过歌手id打印该歌手的唱片信息\n\n\t$ nxspider login-smv -lu <user name or phone number> [-lp <password>]\n\t# 建议不使用-lp(会显示密码),登录后爬取自己收藏的所有mv\n\n\t$ nxspider login-spls -lu <login user> [-lp <login password>] [-dw <mv,mp3>]\n\t# 建议不使用-lp(会显示密码),登录后爬取自己所有歌单(包括最爱),下载mp3和mv(记得别忘输入 -dw mp3,mv)\n\n\t$ nxspider config_clear\n\t# 恢复所有设置到默认,建议执行后进行配置下载目录\n\n\t$ nxspider -h\n\t# 更多命令请参考这个\n\n### 部分命令效果图\n![img](img/search_ab.png)\n\n![img](img/spider_spls.png)\n\n![img](img/sar-top-mp3.png)\n\n### 小tips\n- 默认配置须关心的为:不使用mongo + 下载路径为 `~/.nxspider/donwload_files/`\n- 使用mongo的话,**强烈要求**执行 `pip(3) install -r requirememts.txt`\n- 配置 ```-tag163 1``` 直接把下载目录添加到PC版某云软件的下载目录下,软件会自动识别已下载\n- 关闭爬取,目前代码是幂等运行,暂时建议直接 ```ctrl + c``` 强制关闭即可\n\n\n### 系统兼容\n| 系统及平台 | 结果 |\n| --- | --- |\n| win7(GBK) + python2 | 字符集问题 |\n| win7(GBK) + python3 | 通过 |\n| win10(GBK) + python2 | 字符集问题 |\n| win10(GBK) + python3 | 通过 |\n| centos7.2(utf8) + python2 | 通过 |\n| centos7.2(utf8) + python3 | 通过 |\n| mac + python | 待补充 |\n</table>\n\n### 效果图示例\n- 已下载mv\n\n![img](img/mv_download.png)\n\n- 已下载MP3\n\n![img](img/mp3_download.png)\n\n- 已采集数据\n\n![img](img/mongodb_data.png)\n\n## 最后\n- 程序关闭,建议直接关闭console或 `ctrl+c`\n- 希望能给我加个★\n- 有其他问题请到issue下留言,或者发邮件给我LipsonChan的雅虎邮箱\n- 提交issue的话建议先阅读 [Contributing](CONTRIBUTING.rst)" }, { "alpha_fraction": 0.654574990272522, "alphanum_fraction": 0.659041702747345, "avg_line_length": 26.261993408203125, "blob_id": "fbfb98575aad527e93f7b2ab2fd477c2736426a8", "content_id": "5793123e37e81b8f114e745c96a909ac5a59ac3e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7388, "license_type": "permissive", "max_line_length": 72, "num_lines": 271, "path": "/NXSpider/model/mongo_model.py", "repo_name": "Z-Shuming/NXSpider", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Lipson on 2018/4/19.\n# email to [email protected]\n#\nfrom datetime import datetime\n\nimport pymongo\nfrom mongoengine import connect, DynamicDocument, Document, signals\nfrom mongoengine.fields import *\nfrom pymongo.errors import ServerSelectionTimeoutError\n\nfrom NXSpider.common import log\nfrom NXSpider.common.config import Config\nmongodb_conf = Config().get_mongo()\n\ntry:\n\n client = pymongo.MongoClient(host=mongodb_conf['host'],\n port=mongodb_conf['port'],\n connectTimeoutMS=3000,\n serverSelectionTimeoutMS=3000)\n test_connect = client.database.test.count()\n del client\nexcept ServerSelectionTimeoutError as e:\n log.print_err(\"mongodb server config error\")\n exit()\n\nmodel_download_url = 'download_url'\nmodel_is_download = 'downloaded'\n\n\ndef field_value(field, value):\n \"\"\"\n Converts a supplied value to the type required by the field.\n If the field requires a EmbeddedDocument the EmbeddedDocument\n is created and updated using the supplied data.\n :param field:\n :param value:\n :return:\n \"\"\"\n if field.__class__ in (ListField, SortedListField):\n # return a list of the field values\n return [\n field_value(field.field, item)\n for item in value]\n\n elif field.__class__ in (\n EmbeddedDocumentField,\n GenericEmbeddedDocumentField,\n ReferenceField,\n GenericReferenceField,\n LazyReferenceField):\n\n if isinstance(value, Document):\n return value\n\n if isinstance(value, dict):\n embedded_doc = field.document_type()\n update_doc(embedded_doc, value)\n return embedded_doc\n return None\n else:\n return value\n\n\ndef update_doc(doc, data):\n \"\"\"\n Update an document to match the supplied dictionary.\n :param doc:\n :param data:\n :return:\n \"\"\"\n\n for key, value in data.items():\n if hasattr(doc, key):\n value = field_value(doc._fields[key], value)\n setattr(doc, key, value)\n else:\n # handle invalid key\n pass\n\n return doc\n\n\ndef update_doc_filter(doc, filter_set, *dicts):\n \"\"\"\n update document by filter and dicts\n :param doc:\n :param filter_set: Set\n :param dicts:\n :return:\n \"\"\"\n obj = {}\n for d in dicts:\n obj.update({k: v for k, v in d.items() if k in filter_set})\n\n return update_doc(doc, obj)\n\n\ndef update_dynamic_doc(doc, data):\n for key, value in data.items():\n if key in doc._fields:\n value = field_value(doc._fields[key], value)\n setattr(doc, key, value)\n\n\ndef update_dynamic_doc_filter(doc, filter_set, *dicts):\n \"\"\"\n update document by filter and dicts\n :param doc:\n :param filter_set: Set\n :param dicts:\n :return:\n \"\"\"\n obj = {}\n for d in dicts:\n obj.update({k: v for k, v in d.items() if k in filter_set})\n\n return update_dynamic_doc(doc, obj)\n\n\ndef update_dynamic_doc_filter(doc, filter_set, *dicts):\n \"\"\"\n update document by filter and dicts\n :param doc:\n :param filter_set: Set\n :param dicts:\n :type dicts: dict\n :type filter_set: list\n :return:\n :rtype: DynamicDocument | None\n \"\"\"\n obj = {}\n for d in dicts:\n obj.update({k: v for k, v in d.items() if k in filter_set})\n\n return update_dynamic_doc(doc, obj)\n\n\ndef update_dynamic_doc_rfilter(doc, rfilter_set, *dicts):\n \"\"\"\n update document by filter and dicts\n :param doc:\n :param rfilter_set: Set\n :param dicts:\n :type dicts: dict\n :type rfilter_set: Set\n :return:\n :rtype: DynamicDocument | None\n \"\"\"\n rfilter_set = {} if rfilter_set is None else rfilter_set\n obj = {}\n for d in dicts:\n obj.update({k: v for k, v in d.items() if k not in rfilter_set})\n\n return update_dynamic_doc(doc, obj)\n\n\ndef get_one_model_by_key(model, model_id):\n \"\"\"\n load or create a model by id\n :type model: DynamicDocument\n :param model:\n :param model_id:\n :return: doc, is_new\n :rtype: (DynamicDocument, boolean)\n \"\"\"\n\n try:\n res = model.objects(id=model_id).first()\n if res is None:\n res = model(id=model_id)\n return res, True\n return res, False\n except Exception as e:\n log.print_err('load a doc err: %s' % e)\n return None, True\n\n\ndef update_dynamic_doc(doc, data):\n for key, value in data.items():\n if key in doc._fields:\n value = field_value(doc._fields[key], value)\n setattr(doc, key, value)\n\n\nclass ConfigModel(DynamicDocument):\n id = StringField(primary_key=True)\n updated_at = DateTimeField(default=datetime.utcnow)\n pass\n\n\nclass AuthorModel(DynamicDocument):\n id = LongField(primary_key=True)\n updated_at = DateTimeField(default=datetime.utcnow)\n pass\n\n\nclass Mp3Model(DynamicDocument):\n id = LongField(primary_key=True)\n artists = ListField(ReferenceField('ArtistModel'))\n album = ReferenceField('AlbumModel')\n mv = ReferenceField('Mp4Model')\n updated_at = DateTimeField(default=datetime.utcnow)\n pass\n\n\nclass ArtistModel(DynamicDocument):\n id = LongField(primary_key=True)\n # mp3s = ListField(ReferenceField(Mp3Model))\n updated_at = DateTimeField(default=datetime.utcnow)\n\n\nclass Mp4Model(DynamicDocument):\n id = LongField(primary_key=True)\n artists = ListField(ReferenceField(ArtistModel))\n updated_at = DateTimeField(default=datetime.utcnow)\n pass\n\n\nclass VideoModel(DynamicDocument):\n id = StringField(primary_key=True)\n artists = ListField(ReferenceField(ArtistModel))\n updated_at = DateTimeField(default=datetime.utcnow)\n pass\n\n\nclass PlaylistModel(DynamicDocument):\n id = LongField(primary_key=True)\n mp3s = ListField(LazyReferenceField(Mp3Model))\n mp4s = ListField(LazyReferenceField(Mp4Model))\n updated_at = DateTimeField(default=datetime.utcnow)\n\n\nclass AlbumModel(DynamicDocument):\n id = LongField(primary_key=True)\n mp3s = ListField(LazyReferenceField(Mp3Model))\n artists = ListField(LazyReferenceField(ArtistModel))\n updated_at = DateTimeField(default=datetime.utcnow)\n\n\nclass UserModel(DynamicDocument):\n id = LongField(primary_key=True)\n # friends = ListField(ReferenceField('self'))\n updated_at = DateTimeField(default=datetime.utcnow)\n\n\nclass Downloaded(DynamicDocument):\n model = StringField(required=True)\n model_id = LongField(required=True)\n updated_at = DateTimeField(default=datetime.utcnow)\n\n\ndef update_timestamp(sender, document, **kwargs):\n document.updated_at = datetime.utcnow()\n\n\nsignals.pre_save.connect(update_timestamp, sender=PlaylistModel)\nsignals.pre_save.connect(update_timestamp, sender=VideoModel)\nsignals.pre_save.connect(update_timestamp, sender=Mp4Model)\nsignals.pre_save.connect(update_timestamp, sender=Mp3Model)\nsignals.pre_save.connect(update_timestamp, sender=AuthorModel)\nsignals.pre_save.connect(update_timestamp, sender=ConfigModel)\nsignals.pre_save.connect(update_timestamp, sender=UserModel)\nsignals.pre_save.connect(update_timestamp, sender=ArtistModel)\nsignals.pre_save.connect(update_timestamp, sender=AlbumModel)\n\nconnect(mongodb_conf['name'], host=mongodb_conf['host'],\n port=mongodb_conf['port'], connectTimeoutMS=3000)\n" } ]
48